diff --git a/CHANGELOG.md b/CHANGELOG.md
index c2decc555c131949cd26286a96cf43da3840a9b2..b82a98b75186d5742d7a6aab4067dfc66b321635 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,14 @@ All notable changes to this project will be documented in this file.
 
 The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/).
 
+## 2.1.0 - 2023-08-03
+
+In the next major update all backwards compatible code will be removed. Please migrate to teh cluster_machine setup and set controlplane_count and worker_count to 0
+
+### Changed
+
+- This add backwards compatibility to the stack, you still need ot define the cluster machines
+
 ## 2.0.0 - 2023-06-28
 
 This is a breaking change. You will need to update your terraform code to use this new version. This is an example of the variable `cluster_machine`.
diff --git a/terraform/modules/argocd/argocd.tf b/terraform/modules/argocd/argocd.tf
index 9758116352d40cee3441b2487797ad291650f998..7ef5c623d58cea0ecc934f972377b866c392d9d8 100644
--- a/terraform/modules/argocd/argocd.tf
+++ b/terraform/modules/argocd/argocd.tf
@@ -1,13 +1,13 @@
 locals {
   cluster_argocd_url = "${var.rancher_url}/k8s/clusters/${var.cluster_kube_id}"
 
-  cluster = templatefile("${path.module}/templates/cluster.yaml.tmpl", {
+  argocd_cluster = templatefile("${path.module}/templates/cluster.yaml.tmpl", {
     cluster_name  = var.cluster_name
     cluster_url   = local.cluster_argocd_url
     rancher_token = var.rancher_token
   })
 
-  project = templatefile("${path.module}/templates/project.yaml.tmpl", {
+  argocd_cluster_project = templatefile("${path.module}/templates/project.yaml.tmpl", {
     cluster_name  = var.cluster_name
     cluster_url   = local.cluster_argocd_url
     admin_groups  = var.admin_groups
@@ -16,7 +16,7 @@ locals {
     member_users  = var.member_users
   })
 
-  app = templatefile("${path.module}/templates/argocd.yaml.tmpl", {
+  argocd_cluster_app = templatefile("${path.module}/templates/argocd.yaml.tmpl", {
     cluster_name                = var.cluster_name
     cluster_url                 = local.cluster_argocd_url
     cluster_kube_id             = var.cluster_kube_id
@@ -55,15 +55,15 @@ locals {
 # ----------------------------------------------------------------------
 resource "kubectl_manifest" "argocd_cluster" {
   count     = var.argocd_kube_id != "" ? 1 : 0
-  yaml_body = local.cluster
+  yaml_body = local.argocd_cluster
 }
 
-resource "kubectl_manifest" "argocd_project" {
+resource "kubectl_manifest" "argocd_cluster_project" {
   count     = var.argocd_kube_id != "" ? 1 : 0
-  yaml_body = local.project
+  yaml_body = local.argocd_cluster_project
 }
 
-resource "kubectl_manifest" "argocd_app" {
+resource "kubectl_manifest" "argocd_cluster_app" {
   count     = var.argocd_kube_id != "" ? 1 : 0
-  yaml_body = local.app
+  yaml_body = local.argocd_cluster_app
 }
diff --git a/terraform/modules/argocd/outputs.tf b/terraform/modules/argocd/outputs.tf
index 0f0103e138fe2207ac038a98276da214828a13a6..6440d0befa64e81d10e8b600a9ddca7cac5bb925 100644
--- a/terraform/modules/argocd/outputs.tf
+++ b/terraform/modules/argocd/outputs.tf
@@ -1,17 +1,17 @@
-output "cluster" {
+output "argocd_cluster" {
   description = "ArgoCD cluster definition"
   sensitive   = true
-  value       = local.cluster
+  value       = local.argocd_cluster
 }
 
-output "project" {
+output "argocd_cluster_project" {
   description = "ArgoCD project and permissions"
   sensitive   = true
-  value       = local.project
+  value       = local.argocd_cluster_project
 }
 
-output "app" {
+output "argocd_cluster_app" {
   description = "ArogCD app of apps"
   sensitive   = true
-  value       = local.app
+  value       = local.argocd_cluster_app
 }
diff --git a/terraform/modules/rke1/data.tf b/terraform/modules/rke1/data.tf
new file mode 100644
index 0000000000000000000000000000000000000000..f57b3ea104c773cfb7cce7d21c331cb216c9ea3c
--- /dev/null
+++ b/terraform/modules/rke1/data.tf
@@ -0,0 +1,22 @@
+# external network
+data "openstack_networking_network_v2" "ext_net" {
+  name = var.openstack_external_net
+}
+
+# boot image
+# DEPRECATED
+data "openstack_images_image_v2" "boot" {
+  name        = var.os
+  most_recent = true
+}
+
+# openstack project name (bbXX)
+data "openstack_identity_auth_scope_v3" "scope" {
+  name = "my_scope"
+}
+
+data "openstack_images_image_v2" "os_image" {
+  for_each    = var.openstack_os_image
+  name        = each.value
+  most_recent = true
+}
diff --git a/terraform/modules/rke1/key.tf b/terraform/modules/rke1/key.tf
new file mode 100644
index 0000000000000000000000000000000000000000..7bee3731b52c4aad74c958ed86485b5f905b94c0
--- /dev/null
+++ b/terraform/modules/rke1/key.tf
@@ -0,0 +1,12 @@
+# Each cluster will either have a shared key, or their own
+# unique key.
+resource "openstack_compute_keypair_v2" "key" {
+  #count      = 1 #var.openstack_ssh_key == "" ? 0 : 1
+  name = var.cluster_name
+}
+
+# set local variable to hold final key, either created or
+# loaded.
+locals {
+  key = var.cluster_name # var.openstack_ssh_key == "" ? var.cluster_name : var.openstack_ssh_key
+}
diff --git a/terraform/modules/rke1/machines.tf b/terraform/modules/rke1/machines.tf
deleted file mode 100644
index a1ff6ec0d0738a8da0c61ef1ad9196bbaacc1a15..0000000000000000000000000000000000000000
--- a/terraform/modules/rke1/machines.tf
+++ /dev/null
@@ -1,90 +0,0 @@
-locals {
-  images = {
-    "centos" = "CentOS-7-GenericCloud-Latest",
-    "ubuntu" = "Ubuntu Jammy (22.04) latest"
-  }
-
-  usernames = {
-    "centos" = "centos",
-    "ubuntu" = "ubuntu"
-  }
-
-  node_options = {
-    "controlplane" = "--address awspublic --internal-address awslocal --controlplane --etcd",
-    "worker"       = "--address awspublic --internal-address awslocal --worker"
-  }
-
-  machines = flatten([
-    for x in var.cluster_machines : [
-      for i in range(x.count == null ? 1 : x.count) : {
-        hostname    = format("%s-%s-%02d", var.cluster_name, x.name, (i + 1))
-        username    = lookup(local.usernames, x.os, "centos")
-        image_name  = lookup(local.images, x.os, "CentOS-7-GenericCloud-Latest")
-        flavor      = try(x.flavor, "gp.medium")
-        image_id    = data.openstack_images_image_v2.boot[try(x.os, "centos")].id
-        disk_size   = try(x.disk, 40)
-        zone        = try(x.zone, "nova")
-        role        = try(x.role, "worker")
-        floating_ip = try(x.floating_ip, can(x.role == "controlplane"))
-        labels      = flatten([x.name, try(x.labels, [])])
-      }
-    ]
-  ])
-
-  jumphost = [for vm in local.machines : vm.hostname if vm.floating_ip][0]
-}
-
-data "openstack_images_image_v2" "boot" {
-  for_each    = local.images
-  name        = each.value
-  most_recent = true
-}
-
-data "openstack_identity_auth_scope_v3" "scope" {
-  name = var.cluster_name
-}
-
-resource "openstack_compute_keypair_v2" "key" {
-  name = var.cluster_name
-}
-
-resource "openstack_compute_instance_v2" "machine" {
-  for_each          = { for vm in local.machines : vm.hostname => vm }
-  name              = each.value.hostname
-  image_name        = each.value.image_name
-  availability_zone = each.value.zone
-  flavor_name       = each.value.flavor
-  key_pair          = openstack_compute_keypair_v2.key.name
-  config_drive      = false
-
-  depends_on = [
-    openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp,
-  ]
-
-  security_groups = [
-    openstack_networking_secgroup_v2.cluster_security_group.name
-  ]
-
-  network {
-    port = openstack_networking_port_v2.machine_ip[each.key].id
-  }
-
-  block_device {
-    uuid                  = each.value.image_id
-    source_type           = "image"
-    volume_size           = each.value.disk_size
-    destination_type      = "volume"
-    delete_on_termination = true
-  }
-
-  user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
-    private_key  = openstack_compute_keypair_v2.key.private_key
-    project_name = data.openstack_identity_auth_scope_v3.scope.project_name
-    cluster_name = var.cluster_name
-    username     = each.value.username
-    node_name    = each.value.hostname
-    node_command = rancher2_cluster.kube.cluster_registration_token.0.node_command
-    node_options = lookup(local.node_options, each.value.role, "--worker")
-    node_labels  = join(" ", [for l in each.value.labels : format("-l %s", replace(l, " ", "_"))])
-  }))
-}
diff --git a/terraform/modules/rke1/network.tf b/terraform/modules/rke1/network.tf
index 2b284e13907baec064c0f6f4c16fb63741b007d9..a20e8d21470b6ac4d9380cc9e842cc216bdc210c 100644
--- a/terraform/modules/rke1/network.tf
+++ b/terraform/modules/rke1/network.tf
@@ -2,14 +2,10 @@
 # private ip spaces of 192.168.0.0/21. Each of the machines will
 # have a fixed ip address in this private IP space.
 #
-# For the worker machines, there will be a set of floating IP addresses
+# For the worker nodes, there will be a set of floating IP addresses
 # that can be given to a load balancer (using for example metallb).
 #
 
-data "openstack_networking_network_v2" "ext_net" {
-  name = var.openstack_external_net
-}
-
 # ----------------------------------------------------------------------
 # setup network, subnet and router
 # ----------------------------------------------------------------------
@@ -85,4 +81,43 @@ resource "openstack_networking_floatingip_v2" "machine_ip" {
   port_id     = openstack_networking_port_v2.machine_ip[each.key].id
 }
 
+# ----------------------------------------------------------------------
+# control plane
+# DEPRECATED
+# ----------------------------------------------------------------------
 
+resource "openstack_networking_port_v2" "controlplane_ip" {
+  count              = var.controlplane_count
+  name               = local.controlplane[count.index]
+  network_id         = openstack_networking_network_v2.cluster_net.id
+  security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
+  depends_on         = [openstack_networking_router_interface_v2.kube_gateway]
+}
+
+resource "openstack_networking_floatingip_v2" "controlplane_ip" {
+  count       = var.controlplane_count
+  description = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
+  pool        = data.openstack_networking_network_v2.ext_net.name
+  port_id     = element(openstack_networking_port_v2.controlplane_ip.*.id, count.index)
+}
+
+# ----------------------------------------------------------------------
+# worker nodes
+# DEPRECATED
+# ----------------------------------------------------------------------
+
+# create worker ip, this can route the ports for the floating ip as
+# well.
+resource "openstack_networking_port_v2" "worker_ip" {
+  count              = var.worker_count
+  name               = local.worker[count.index]
+  network_id         = openstack_networking_network_v2.cluster_net.id
+  security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
+  depends_on         = [openstack_networking_router_interface_v2.kube_gateway]
+  dynamic "allowed_address_pairs" {
+    for_each = openstack_networking_port_v2.floating_ip.*.all_fixed_ips.0
+    content {
+      ip_address = allowed_address_pairs.value
+    }
+  }
+}
diff --git a/terraform/modules/rke1/nodes.tf b/terraform/modules/rke1/nodes.tf
new file mode 100644
index 0000000000000000000000000000000000000000..55c819fb85b3a8517059e79b77cbc9587bb58ccb
--- /dev/null
+++ b/terraform/modules/rke1/nodes.tf
@@ -0,0 +1,193 @@
+locals {
+  usernames = {
+    "centos" = "centos",
+    "ubuntu" = "ubuntu"
+  }
+
+  node_options = {
+    "controlplane" = "--address awspublic --internal-address awslocal --controlplane --etcd",
+    "worker"       = "--address awspublic --internal-address awslocal --worker"
+  }
+
+  machines = flatten([
+    for x in var.cluster_machines : [
+      for i in range(x.count == null ? 1 : x.count) : {
+        hostname    = format("%s-%s-%02d", var.cluster_name, x.name, (i + 1))
+        username    = lookup(local.usernames, x.os, "UNDEFINED")
+        image_name  = lookup(var.openstack_os_image, x.os, "UNDEFINED")
+        flavor      = try(x.flavor, "gp.medium")
+        image_id    = data.openstack_images_image_v2.os_image[try(x.os, "UNDEFINED")].id
+        disk_size   = try(x.disk, 40)
+        zone        = try(x.zone, "nova")
+        role        = try(x.role, "worker")
+        floating_ip = try(x.floating_ip, can(x.role == "controlplane"))
+        labels      = flatten([x.name, try(x.labels, [])])
+      }
+    ]
+  ])
+
+  jumphost = concat([for vm in local.machines : vm.hostname if vm.floating_ip], local.controlplane)[0]
+
+  # DEPRECATED
+  controlplane = [for l in range(var.controlplane_count) : var.old_hostnames ? format("%s-controlplane-%d", var.cluster_name, l) : format("%s-controlplane-%d", var.cluster_name, l + 1)]
+  worker       = [for l in range(var.worker_count) : var.old_hostnames ? format("%s-worker-%d", var.cluster_name, l) : format("%s-worker-%02d", var.cluster_name, l + 1)]
+}
+
+# ----------------------------------------------------------------------
+# cluster nodes
+# ----------------------------------------------------------------------
+resource "openstack_compute_instance_v2" "machine" {
+  for_each          = { for vm in local.machines : vm.hostname => vm }
+  name              = each.value.hostname
+  image_name        = each.value.image_name
+  availability_zone = each.value.zone
+  flavor_name       = each.value.flavor
+  key_pair          = openstack_compute_keypair_v2.key.name
+  config_drive      = false
+
+  depends_on = [
+    openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp,
+  ]
+
+  security_groups = [
+    openstack_networking_secgroup_v2.cluster_security_group.name
+  ]
+
+  network {
+    port = openstack_networking_port_v2.machine_ip[each.key].id
+  }
+
+  block_device {
+    uuid                  = each.value.image_id
+    source_type           = "image"
+    volume_size           = each.value.disk_size
+    destination_type      = "volume"
+    delete_on_termination = true
+  }
+
+  user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
+    private_key  = openstack_compute_keypair_v2.key.private_key
+    project_name = data.openstack_identity_auth_scope_v3.scope.project_name
+    cluster_name = var.cluster_name
+    username     = each.value.username
+    node_name    = each.value.hostname
+    node_command = rancher2_cluster.kube.cluster_registration_token.0.node_command
+    node_options = lookup(local.node_options, each.value.role, "--worker")
+    node_labels  = join(" ", [for l in each.value.labels : format("-l %s", replace(l, " ", "_"))])
+  }))
+}
+
+# ----------------------------------------------------------------------
+# control-plane nodes
+# DEPRECATED
+# ----------------------------------------------------------------------
+resource "openstack_compute_instance_v2" "controlplane" {
+  count             = var.controlplane_count
+  name              = local.controlplane[count.index]
+  image_name        = var.os
+  availability_zone = var.openstack_zone
+  flavor_name       = var.controlplane_flavor
+  key_pair          = openstack_compute_keypair_v2.key.name
+  config_drive      = false
+
+  depends_on = [
+    openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp,
+  ]
+
+  security_groups = [
+    openstack_networking_secgroup_v2.cluster_security_group.name
+  ]
+
+  #echo "update hosts"
+  #%{ for ip in openstack_networking_port_v2.worker_ip[count.index].all_fixed_ips }
+  #echo "$${ip} $${node_name} $(hostname) $(hostname -f)"  >> /etc/hosts
+  #%{ endfor }
+
+  user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
+    private_key  = openstack_compute_keypair_v2.key.private_key
+    project_name = data.openstack_identity_auth_scope_v3.scope.project_name
+    cluster_name = var.cluster_name
+    username     = "centos"
+    node_name    = local.controlplane[count.index]
+    node_command = rancher2_cluster.kube.cluster_registration_token.0.node_command
+    node_options = "--address awspublic --internal-address awslocal --controlplane --etcd"
+    node_labels  = ""
+  }))
+
+  block_device {
+    uuid                  = data.openstack_images_image_v2.boot.id
+    source_type           = "image"
+    volume_size           = var.controlplane_disksize
+    destination_type      = "volume"
+    delete_on_termination = true
+  }
+
+  network {
+    port = element(openstack_networking_port_v2.controlplane_ip.*.id, count.index)
+  }
+
+  lifecycle {
+    ignore_changes = [
+      key_pair,
+      block_device,
+      user_data,
+      availability_zone
+    ]
+  }
+}
+
+# ----------------------------------------------------------------------
+# worker nodes
+# DEPRECATED
+# ----------------------------------------------------------------------
+resource "openstack_compute_instance_v2" "worker" {
+  count             = var.worker_count
+  name              = local.worker[count.index]
+  image_name        = var.os
+  availability_zone = var.openstack_zone
+  flavor_name       = var.worker_flavor
+  key_pair          = local.key
+  config_drive      = false
+
+  depends_on = [
+    openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp
+  ]
+
+  security_groups = [
+    openstack_networking_secgroup_v2.cluster_security_group.name
+  ]
+
+  user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
+    private_key  = openstack_compute_keypair_v2.key.private_key
+    project_name = data.openstack_identity_auth_scope_v3.scope.project_name
+    cluster_name = var.cluster_name
+    node_name    = local.worker[count.index]
+    username     = "centos"
+    node_command = rancher2_cluster.kube.cluster_registration_token.0.node_command
+    node_options = "--worker"
+    node_labels  = ""
+  }))
+
+  block_device {
+    uuid                  = data.openstack_images_image_v2.boot.id
+    source_type           = "image"
+    volume_size           = var.worker_disksize
+    destination_type      = "volume"
+    boot_index            = 0
+    delete_on_termination = true
+  }
+
+  network {
+    port = element(openstack_networking_port_v2.worker_ip.*.id, count.index)
+  }
+
+  lifecycle {
+    ignore_changes = [
+      key_pair,
+      block_device,
+      user_data,
+      availability_zone
+    ]
+  }
+}
+
diff --git a/terraform/modules/rke1/outputs.tf b/terraform/modules/rke1/outputs.tf
index 2e5a039e5e7b747f31eaefaa229d7442cc6bf686..4e33cd41bd034b6675ca280dca8de344233086b5 100644
--- a/terraform/modules/rke1/outputs.tf
+++ b/terraform/modules/rke1/outputs.tf
@@ -24,6 +24,15 @@ output "ssh_config" {
   value       = <<-EOT
 # Automatically created by terraform
 
+%{~for i, x in openstack_compute_instance_v2.controlplane.*}
+Host ${x.name}
+  HostName ${openstack_networking_floatingip_v2.controlplane_ip[i].address}
+  StrictHostKeyChecking no
+  UserKnownHostsFile=/dev/null
+  IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
+  User centos
+%{~endfor}
+
 %{~for x in [for m in local.machines : m if m.floating_ip]}
 Host ${x.hostname}
   HostName ${openstack_networking_floatingip_v2.machine_ip[x.hostname].address}
@@ -33,6 +42,16 @@ Host ${x.hostname}
   User ${x.username}
 %{~endfor}
 
+%{~for x in openstack_compute_instance_v2.worker.*}
+Host ${x.name}
+  HostName ${x.network[0].fixed_ip_v4}
+  StrictHostKeyChecking no
+  ProxyJump ${local.jumphost}
+  UserKnownHostsFile=/dev/null
+  IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
+  User centos
+%{~endfor}
+
 %{~for x in [for m in local.machines : m if !m.floating_ip]}
 Host ${x.hostname}
   ProxyJump ${local.jumphost}
diff --git a/terraform/modules/rke1/rancher.tf b/terraform/modules/rke1/rancher.tf
index 3ca849796eade2e81587940177d824d6f8071a9f..71cde4fd1db3ef732bc6dffb05fa1addc21d9d07 100644
--- a/terraform/modules/rke1/rancher.tf
+++ b/terraform/modules/rke1/rancher.tf
@@ -32,13 +32,20 @@ resource "rancher2_cluster" "kube" {
   }
 }
 
+# Create a new rancher2 Cluster Sync for foo-custom cluster
+resource "rancher2_cluster_sync" "kube" {
+  depends_on    = [openstack_compute_instance_v2.controlplane[0]]
+  cluster_id    = rancher2_cluster.kube.id
+  wait_catalogs = false
+}
+
 # ----------------------------------------------------------------------
 # cluster access
 # ----------------------------------------------------------------------
 resource "rancher2_cluster_role_template_binding" "admin_users" {
   for_each          = var.admin_users
-  name              = "${rancher2_cluster.kube.id}-user-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster.kube.id
+  name              = "${rancher2_cluster_sync.kube.id}-user-${replace(each.value, "_", "-")}"
+  cluster_id        = rancher2_cluster_sync.kube.id
   role_template_id  = "cluster-owner"
   user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
   lifecycle {
@@ -52,8 +59,8 @@ resource "rancher2_cluster_role_template_binding" "admin_users" {
 
 resource "rancher2_cluster_role_template_binding" "admin_groups" {
   for_each          = var.admin_groups
-  name              = "${rancher2_cluster.kube.id}-group-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster.kube.id
+  name              = "${rancher2_cluster_sync.kube.id}-group-${replace(each.value, "_", "-")}"
+  cluster_id        = rancher2_cluster_sync.kube.id
   role_template_id  = "cluster-owner"
   user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
   lifecycle {
@@ -67,8 +74,8 @@ resource "rancher2_cluster_role_template_binding" "admin_groups" {
 
 resource "rancher2_cluster_role_template_binding" "member_users" {
   for_each          = var.member_users
-  name              = "${rancher2_cluster.kube.id}-user-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster.kube.id
+  name              = "${rancher2_cluster_sync.kube.id}-user-${replace(each.value, "_", "-")}"
+  cluster_id        = rancher2_cluster_sync.kube.id
   role_template_id  = "cluster-member"
   user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
   lifecycle {
@@ -82,8 +89,8 @@ resource "rancher2_cluster_role_template_binding" "member_users" {
 
 resource "rancher2_cluster_role_template_binding" "member_groups" {
   for_each          = var.member_groups
-  name              = "${rancher2_cluster.kube.id}-group-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster.kube.id
+  name              = "${rancher2_cluster_sync.kube.id}-group-${replace(each.value, "_", "-")}"
+  cluster_id        = rancher2_cluster_sync.kube.id
   role_template_id  = "cluster-member"
   user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
   lifecycle {
@@ -94,3 +101,57 @@ resource "rancher2_cluster_role_template_binding" "member_groups" {
     ]
   }
 }
+
+# ----------------------------------------------------------------------
+# longhorn storage
+# DEPRECATED
+# ----------------------------------------------------------------------
+resource "rancher2_app_v2" "longhorn-system" {
+  count      = var.longhorn_enabled ? 1 : 0
+  cluster_id = rancher2_cluster_sync.kube.cluster_id
+  name       = "longhorn"
+  namespace  = "longhorn-system"
+  repo_name  = "rancher-charts"
+  chart_name = "longhorn"
+  project_id = rancher2_cluster_sync.kube.system_project_id
+  values     = <<EOF
+defaultSettings:
+  backupTarget: nfs://radiant-nfs.ncsa.illinois.edu:/radiant/projects/${data.openstack_identity_auth_scope_v3.scope.project_name}/${var.cluster_name}/backup
+  defaultReplicaCount: ${var.longhorn_replicas}
+persistence:
+  defaultClass: false
+  defaultClassReplicaCount: ${var.longhorn_replicas}
+EOF
+  lifecycle {
+    ignore_changes = [
+      values
+    ]
+  }
+}
+
+# ----------------------------------------------------------------------
+# monitoring
+# DEPRECATED
+# ----------------------------------------------------------------------
+resource "rancher2_app_v2" "monitor" {
+  count      = var.monitoring_enabled ? 1 : 0
+  cluster_id = rancher2_cluster_sync.kube.cluster_id
+  name       = "rancher-monitoring"
+  namespace  = "cattle-monitoring-system"
+  repo_name  = "rancher-charts"
+  chart_name = "rancher-monitoring"
+  project_id = rancher2_cluster_sync.kube.system_project_id
+  //  values        = <<EOF
+  //prometheus:
+  //  resources:
+  //    core:
+  //      limits:
+  //        cpu: "4000m"
+  //        memory: "6144Mi"
+  //EOF
+  lifecycle {
+    ignore_changes = [
+      values
+    ]
+  }
+}
diff --git a/terraform/modules/rke1/security_group.tf b/terraform/modules/rke1/security_group.tf
index a365dbf06f4fea1e5974266782927f4660218c49..48629e157665af3627b9a4bd21b24f0dd1445fff 100644
--- a/terraform/modules/rke1/security_group.tf
+++ b/terraform/modules/rke1/security_group.tf
@@ -3,6 +3,26 @@ resource "openstack_networking_secgroup_v2" "cluster_security_group" {
   description = "${var.cluster_name} kubernetes cluster security group"
 }
 
+# ----------------------------------------------------------------------
+# Egress
+# ----------------------------------------------------------------------
+
+#Egress  IPv4  Any Any 0.0.0.0/0 - - 
+#resource "openstack_networking_secgroup_rule_v2" "egress_ipv4" {
+#  direction         = "egress"
+#  ethertype         = "IPv4"
+#  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
+#  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
+#}
+
+#Egress  IPv6  Any Any ::/0  - - 
+#resource "openstack_networking_secgroup_rule_v2" "egress_ipv6" {
+#  direction         = "egress"
+#  ethertype         = "IPv6"
+#  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
+#  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
+#}
+
 # ----------------------------------------------------------------------
 # Ingress
 # ----------------------------------------------------------------------
@@ -52,30 +72,32 @@ resource "openstack_networking_secgroup_rule_v2" "ingress_https" {
   depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
 }
 
-# Ingress IPv4  TCP 6443  141.142.0.0/16  - kube api  
+# Ingress IPv4  TCP 6443  racher  - kube api  
 resource "openstack_networking_secgroup_rule_v2" "ingress_kubeapi" {
-  description       = "kubeapi"
+  for_each          = var.openstack_security_kubernetes
+  description       = "kubeapi ${each.key}"
   direction         = "ingress"
   ethertype         = "IPv4"
   protocol          = "tcp"
   port_range_min    = 6443
   port_range_max    = 6443
-  remote_ip_prefix  = var.openstack_security_kubernetes
+  remote_ip_prefix  = each.value
   security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
   depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
 }
 
-# Ingress IPv4  TCP 30000 - 32767 0.0.0.0/0 - nodeport  
-# resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport" {
-#   description       = "nodeport"
-#   direction         = "ingress"
-#   ethertype         = "IPv4"
-#   protocol          = "tcp"
-#   port_range_min    = 30000
-#   port_range_max    = 32767
-#   security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-#   depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-# }
+# Ingress IPv4  TCP 30000 - 32767 0.0.0.0/0 - nodeport
+# DEPRECATED
+resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport" {
+  description       = "nodeport"
+  direction         = "ingress"
+  ethertype         = "IPv4"
+  protocol          = "tcp"
+  port_range_min    = 30000
+  port_range_max    = 32767
+  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
+  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
+}
 
 resource "openstack_networking_secgroup_rule_v2" "same_security_group_ingress_tcp" {
   direction         = "ingress"
@@ -95,7 +117,6 @@ resource "openstack_networking_secgroup_rule_v2" "same_security_group_ingress_ud
   depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
 }
 
-
 # Ingress IPv4  TCP 1 - 65535 192.168.100.0/24  - - 
 # Ingress IPv4  TCP 1 - 65535 141.142.216.0/21  - - 
 # Ingress IPv4  TCP 2376  141.142.0.0/16  - - 
diff --git a/terraform/modules/rke1/templates/user_data.tmpl b/terraform/modules/rke1/templates/user_data.tmpl
index 920d24882f6fb453d138307c2dde88ef257abfde..2f12a3c424ec9ca987b92afe88e08b36081655e0 100644
--- a/terraform/modules/rke1/templates/user_data.tmpl
+++ b/terraform/modules/rke1/templates/user_data.tmpl
@@ -44,16 +44,16 @@ write_files:
     else
       echo "Don't know how to install iscsi/nfs"
     fi
-    echo "mounting taiga"
-    mkdir /taiga
-    #mount -av
     echo "install docker"
-    curl https://releases.rancher.com/install-docker/20.10.sh | sh
+    curl https://releases.rancher.com/install-docker/24.0.sh | sh
     systemctl enable docker
     systemctl start docker
     usermod -aG docker ${username}
     echo "connect to rancher"
     ${node_command} ${node_options} ${node_labels}
+    echo "mounting taiga"
+    mkdir /taiga
+    #mount -av
     echo "all done"
 
 # run this command once the system is booted
diff --git a/terraform/modules/rke1/variables.tf b/terraform/modules/rke1/variables.tf
index 386bc3c933862e23af35edbb2bf19e2be35920bd..975fbd89ad5a1b65f90f700613625964afff643f 100644
--- a/terraform/modules/rke1/variables.tf
+++ b/terraform/modules/rke1/variables.tf
@@ -24,6 +24,28 @@ variable "cluster_machines" {
   default     = []
 }
 
+# ----------------------------------------------------------------------
+# APPLICATIONS
+# ----------------------------------------------------------------------
+
+variable "monitoring_enabled" {
+  type        = bool
+  description = "Enable monitoring in rancher"
+  default     = true
+}
+
+variable "longhorn_enabled" {
+  type        = bool
+  description = "Enable longhorn storage"
+  default     = true
+}
+
+variable "longhorn_replicas" {
+  type        = string
+  description = "Number of replicas"
+  default     = 3
+}
+
 # ----------------------------------------------------------------------
 # RANCHER
 # ----------------------------------------------------------------------
@@ -44,6 +66,7 @@ variable "rancher_token" {
 variable "rke1_version" {
   type        = string
   description = "Version of rke1 to install."
+  default     = "v1.21.14-rancher1-1"
 }
 
 # ----------------------------------------------------------------------
@@ -81,7 +104,7 @@ variable "member_groups" {
 variable "openstack_url" {
   type        = string
   description = "OpenStack URL"
-  default     = "https://radiant.ncsa.illinois.edu:5000"
+  default     = "https://radiant.ncsa.illinois.edu"
 }
 
 variable "openstack_credential_id" {
@@ -102,10 +125,97 @@ variable "openstack_external_net" {
   default     = "ext-net"
 }
 
+# DEPRECATED, new key will always be created
+variable "openstack_ssh_key" {
+  type        = string
+  description = "existing SSH key to use, leave blank for a new one"
+  default     = ""
+}
+
+# DEPRECATED
+variable "openstack_zone" {
+  type        = string
+  description = "default zone to use for openstack nodes"
+  default     = "nova"
+}
+
 variable "openstack_security_kubernetes" {
+  type        = map(any)
+  description = "IP address to allow connections to kube api port, default is rancher nodes"
+  default = {
+    "rancher-1" : "141.142.218.167/16"
+    "rancher-2" : "141.142.217.171/16"
+    "rancher-3" : "141.142.217.184/16"
+  }
+}
+
+variable "openstack_os_image" {
+  type        = map(any)
+  description = "Map from short OS name to image"
+  default = {
+    "centos" = "CentOS-7-GenericCloud-Latest"
+    "ubuntu" = "Ubuntu Jammy (22.04) latest"
+  }
+}
+
+# ----------------------------------------------------------------------
+# OPENSTACK NODES
+# ----------------------------------------------------------------------
+
+# DEPRECATED
+variable "old_hostnames" {
+  type        = bool
+  description = "should old hostname be used (base 0)"
+  default     = false
+}
+
+# DEPRECATED
+variable "os" {
+  type        = string
+  description = "Base image to use for the OS"
+  default     = "CentOS-7-GenericCloud-Latest"
+}
+
+# DEPRECATED
+variable "controlplane_count" {
+  type        = string
+  description = "Desired quantity of control-plane nodes"
+  default     = 1
+}
+
+# DEPRECATED
+variable "controlplane_flavor" {
+  type        = string
+  description = "Desired flavor of control-plane nodes"
+  default     = "m1.medium"
+}
+
+# DEPRECATED
+variable "controlplane_disksize" {
+  type        = string
+  description = "Desired disksize of control-plane nodes"
+  default     = 40
+}
+
+# DEPRECATED
+variable "worker_count" {
+  type        = string
+  description = "Desired quantity of worker nodes"
+  default     = 1
+}
+
+# DEPRECATED
+variable "worker_flavor" {
+  type        = string
+  description = "Desired flavor of worker nodes"
+  default     = "m1.large"
+}
+
+# DEPRECATED
+variable "worker_disksize" {
   type        = string
-  description = "IP address to allow connections to kube api port"
-  default     = "141.142.0.0/16"
+  description = "Desired disksize of worker nodes"
+  default     = 40
 }
 
 # ----------------------------------------------------------------------