Specifying kubernetes version for unmanaged EKS worker nodes in terraform

We are creating an EKS cluster with worker nodes using the below terraform modules. We can see that there is an option to specify the kubernetes version for the master node in the aws_eks_cluster resource. However, on specifying this, the worker nodes do not get the same version.
For ex, if version is set as 1.15 in aws_eks_cluster resource, the worker nodes which join the cluster still seem to have 1.13 as the version. We are aware that the worker nodes version can be specified with aws_eks_node_group resource. But we cannot go with that approach since certain things do not suit out case.
Can we specify the version for the worker nodes that join the cluster using the below resources in any way?

resource "aws_eks_cluster" "eks_cluster" {
  name            = "${var.eks_cluster_name}"
  role_arn        = "${var.iam_role_master}"
 version = "1.15"
  vpc_config {
    security_group_ids = ["${var.sg-eks-master}"]
    subnet_ids = ["${var.subnet_private1}", "${var.subnet_private2}","${var.subnet_public1}","${var.subnet_public2}"]
    endpoint_private_access= true
    endpoint_public_access = true
        public_access_cidrs = ["${var.accessingip}"]
  }
}
locals {
iam-eks-node-userdata = <<USERDATA
#!/bin/bash
set -o xtrace
/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.eks_cluster.endpoint}' --b64-cluster-ca '${aws_eks_cluster.eks_cluster.certificate_authority.0.data}' '${var.eks_cluster_name}'
USERDATA
}
resource "aws_launch_configuration" "lc_eks" {
  iam_instance_profile        = "${var.instance_profile_node}"
  image_id                    = "${var.image_id}"
  instance_type               = "${var.instance_type}"
  name_prefix                 = "lc-${var.client}"
  security_groups             = ["${var.sg-eks-node}"]
  user_data_base64            = "${base64encode(local.iam-eks-node-userdata)}"
  lifecycle {
    create_before_destroy = true
  }
}
resource "aws_autoscaling_group" "asg_eks" {
  desired_capacity     = "${var.min_node_count}"
  launch_configuration = "${aws_launch_configuration.lc_eks.id}"
  max_size             = "${var.max_node_count}"
  min_size             = "${var.min_node_count}"
  name                 = "asg-${var.client}"
  vpc_zone_identifier  = ["${var.subnet_private1}", "${var.subnet_private2}","${var.subnet_public1}","${var.subnet_public2}"]
  tag {
    key                 = "Name"
    value               = "${var.client}-terraform-tf-eks"
    propagate_at_launch = true
  }
  tag {
    key                 = "kubernetes.io/cluster/${var.eks_cluster_name}"
    value               = "owned"
    propagate_at_launch = true
  }
}
data "external" "aws_iam_authenticator" {
  program = ["/bin/bash", "-c", "aws-iam-authenticator token -i ${var.eks_cluster_name} | jq -c -r .status"]
}

provider "kubernetes" {
  host                      = "${aws_eks_cluster.eks_cluster.endpoint}"
  cluster_ca_certificate    = "${base64decode(aws_eks_cluster.eks_cluster.certificate_authority.0.data)}"
  token                     = "${data.external.aws_iam_authenticator.result.token}"
  load_config_file          = false
  version = "1.11.3"
}
resource "kubernetes_config_map" "aws_auth" {
  metadata {
    name = "aws-auth"
    namespace = "kube-system"
  }
  data {
    mapRoles = <<EOF
- rolearn: "${var.iam_role_node}"
  username: system:node:{{EC2PrivateDNSName}}
  groups:
    - system:bootstrappers
    - system:nodes
EOF
  }
  depends_on = [
    "aws_eks_cluster.eks_cluster"  ]
}