Connection to task in a different group is refused via sidecar for count api example

Cannot get nomad job to work with sidecar using consul as service discovery.
My use case involves two tasks in two different groups to be able to talk to each other, it pretty much what count api example demonstrates but I just cant seem to make it work on my nomad consul clusters. I created the nomad and consul cluster using the Tutorial Set up a Nomad cluster on AWS.

I dont see anything that stands out in logs in either clusters - the UI seems to indicate the proxys’ are configured correctly. Any help debugging would be appreciated, I am new to nomad and consul and so far it has been great working with it.

Here is my server config files

cat /etc/nomad.d/nomad.hcl
data_dir  = "/opt/nomad/data"
bind_addr = "0.0.0.0"

# Enable the server
server {
  enabled          = true
  bootstrap_expect = 3
}

consul {
  address = "127.0.0.1:8500"
  token = "405b7550-7616-6b14-e542-8a8f447e2860"
}

acl {
  enabled = true
}

vault {
  enabled          = false
  address          = "http://active.vault.service.consul:8200"
  task_token_ttl   = "1h"
  create_from_role = "nomad-cluster"
  token            = ""
}
$ cat /etc/consul.d/consul.hcl
data_dir = "/opt/consul/data"
bind_addr = "0.0.0.0"
client_addr = "0.0.0.0"
advertise_addr = "172.31.49.171"

bootstrap_expect = 3

acl {
    enabled = true
    default_policy = "allow"
    down_policy = "extend-cache"
}

log_level = "INFO"

server = true
ui = true
retry_join = ["provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"]

service {
    name = "consul"
}

connect {
  enabled = true
}

ports {
  grpc = 8502
}

Here is the client config

cat /etc/consul.d/consul.hcl
ui = true
log_level = "INFO"
data_dir = "/opt/consul/data"
bind_addr = "0.0.0.0"
client_addr = "0.0.0.0"
advertise_addr = "172.31.30.79"
retry_join = ["provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"]

acl {
    enabled = true
    default_policy = "allow"
    down_policy = "extend-cache"
}

connect {
  enabled = true
}
ports {
  grpc = 8502
}
$ cat /etc/nomad.d/nomad.hcl
data_dir  = "/opt/nomad/data"
bind_addr = "0.0.0.0"
datacenter = "dc1"

# Enable the client
client {
  enabled = true
  options {
    "driver.raw_exec.enable"    = "1"
    "docker.privileged.enabled" = "true"
  }
}

acl {
  enabled = true
}

consul {
  address = "127.0.0.1:8500"
  token = "hiddenxxxxx-xxxxx-xxxxx"
}

vault {
  enabled = true
  address = "http://active.vault.service.consul:8200"
}

FWIW here is the nomad job

job "countdash" {
   datacenters = ["dc1"]
   group "api" {
     network {
       mode = "bridge"
     }

     service {
       name = "count-api"
       port = "9001"

       connect {
         sidecar_service {}
       }
     }

     task "web" {
       driver = "docker"
       config {
         image = "hashicorpnomad/counter-api:v1"
       }
     }
   }

   group "dashboard" {
     network {
       mode = "bridge"
       port "http" {
         static = 9002
         to     = 9002
       }
     }

     service {
       name = "count-dashboard"
       port = "9002"

       connect {
         sidecar_service {
           proxy {
             upstreams {
               destination_name = "count-api"
               local_bind_port = 8080
             }
           }
         }
       }
     }

     task "dashboard" {
       driver = "docker"
       env {
         COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}"
       }
       config {
         image = "hashicorpnomad/counter-dashboard:v1"
       }
     }
   }
 }

Here is a UI snapshot of the allocation

Did you Set the intentions in consul to allow the service to connect to the other Service?