diff --git a/CHANGELOG.md b/CHANGELOG.md
index f4900330bd0bafe11a4bd6976df27a4779f6741c..8214f3b280ff839bbd3ed953395be6966f984104 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,24 @@ All notable changes to this project will be documented in this file.
 
 The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/).
 
+## 3.2.0 - 2024-08-04
+
+This allows to create a cluster that is RKE2 or K3S as well as RKE1. RKE1 is deprecated and will stop to be supported on July 31st, 2025. If you want to use either RKE2 or K3S you will need to change the `network_plugin`.
+
+In version 3.5.0 the default network for RKE1 will be set to canal, please make sure to either upgrade or explicitly say to use weave.
+In version 4.0.0 RKE1 will be removed
+
+### Added
+- can use RKE2 or K3S clusters by setting kubernetes_version (leave blank to create RKE1 cluster)
+- can specify the key to use for the cluster, and not create a new key for each cluster (`openstack_ssh_key`)
+
+### Changed
+- renamed rke1 module to cluster module, until version 4.0.0 rke1 module will be pushed as well as cluster module.
+- added commands to clean up default chrony sources
+
+### Removed
+- removed rke2 module, this is now part of cluster module
+
 ## 3.1.2 - 2024-07-03
 
 ### Changed
diff --git a/README.md b/README.md
index bbe7a25f1dc1923da7109605383184b467c093b4..af29b58493718457194c9b9f30cadb486b8b9920 100644
--- a/README.md
+++ b/README.md
@@ -14,41 +14,45 @@ Currently the only supported (and used) modules are RKE1 and ArgoCD.
 
 Creates a project/cluster/app in the managed argocd setup. The users with access to the cluster will also have access to this project in argocd. The app installed will point to the charts/apps folder which installs most of the components of the kubernetes cluster. Many of thse can be turn and off using the variables.
 
-If all options are enabled this will install:
-- ingress controller (traefik v2)
+The following modules are enabled by default:
+- ingress controller (traefik by default)
 - nfs provisioner (connected to taiga)
 - cinder provisioner (volumes as storage in openstack)
-- metallb (loadbalancer using floating ips from RKE1 module)
+- metallb (loadbalancer using floating ips from cluster module)
 - sealed secrets
+- monitoring
+- raw kubernetes
+The following modules are enabled by default:
+- longhorn 
 - healthmonitor (expects a secret, so sealed secrets should be enabled)
-- raw kubernetes (used by metallb)
 
-## RKE1 (terraform/modules/rke1)
+## Cluster (terraform/modules/cluster)
 
 Creates a cluster using rancher and openstack. This will create the following pieces for the
 cluster:
 - private network
 - floating IP for load balancer
 - security group
-  - port 22 open to the world
+  - port 22 open to NCSA
   - port 80/443 open to the world
-  - ports 30000 - 32000 open to the world
-  - port 6443 open to NCSA
+  - port 6443 open to all 3 rancher machines
   - all ports open to hosts in same network
-- ssh key
-- rancher managed RKE1 cluster 
-  - monitoring if requested
-  - longhorn if requested
+- ssh key (see note below)
+- rancher managed cluster 
   - admin/normal users using ldap
 - control nodes, with a floating IP for external access
   - iscsi (longhorn) and nfs installed
-  - docker installed
+  - docker installed in case of RKE1 cluster
   - connected to rancher
 - worker nodes, private network
   - iscsi (longhorn) and nfs installed
-  - docker installed
+  - docker installed in case of RKE1 cluster
   - connected to rancher
 
+### SSH key
+
+If multiple people run `terraform` it is critical that **everybody** has the same public key in openstack! This can be done by sharing the public key and asking people to import the public key in openstack and naming it the same as the cluster. If this is not done each user will creat their own public/private keypair and you end up with a mix of keys that are injected in the cluster.
+
 ### Definition of machines
 
 Create a file named `cluster.json` following the example in `cluster.example.json` and customize to define the desired set of nodes. The global cluster name is combined with the `name` value and the index of the machine to generate the individual hostnames, where the index ranges from `start_index` to `start_index + count - 1`. The `start_index` spec allows you to avoid name collisions while having multiple machine configurations following the same sequential naming convention. 
@@ -64,16 +68,6 @@ k8s-worker-02      (gp.xlarge, 60GB disk)
 k8s-worker-03      (m1.xlarge, 60GB disk)
 ```
 
-
-
-## RKE2 (terraform/modules/rke2)
-
-This module is not supported yet, will create an RKE2 cluster
-
-## compute/openstack and rancher
-
-No longer supported
-
 # ArgoCD (argocd)
 
 This is the scripts used to bootstrap argocd in one cluster. This installation is used in the argocd terraform module to create projects/cluster.
diff --git a/terraform/modules/rke1/cluster.example.json b/terraform/modules/cluster/cluster.example.json
similarity index 100%
rename from terraform/modules/rke1/cluster.example.json
rename to terraform/modules/cluster/cluster.example.json
diff --git a/terraform/modules/rke1/data.tf b/terraform/modules/cluster/data.tf
similarity index 100%
rename from terraform/modules/rke1/data.tf
rename to terraform/modules/cluster/data.tf
diff --git a/terraform/modules/cluster/key.tf b/terraform/modules/cluster/key.tf
new file mode 100644
index 0000000000000000000000000000000000000000..c9967011af1ea7737834a75c8704ecf9e3559c9c
--- /dev/null
+++ b/terraform/modules/cluster/key.tf
@@ -0,0 +1,18 @@
+# Each cluster will either have a shared key, or their own
+# unique key.
+resource "openstack_compute_keypair_v2" "key" {
+  count = var.openstack_ssh_key == "" ? 1 : 0
+  name  = var.cluster_name
+}
+
+data "openstack_compute_keypair_v2" "key" {
+  count = var.openstack_ssh_key == "" ? 0 : 1
+  name  = var.openstack_ssh_key
+}
+
+# set local variable to hold final key, either created or
+# loaded.
+locals {
+  key_name    = var.openstack_ssh_key == "" ? openstack_compute_keypair_v2.key[0].name : data.openstack_compute_keypair_v2.key[0].name
+  private_key = var.openstack_ssh_key == "" ? openstack_compute_keypair_v2.key[0].private_key : ""
+}
diff --git a/terraform/modules/rke1/network.tf b/terraform/modules/cluster/network.tf
similarity index 100%
rename from terraform/modules/rke1/network.tf
rename to terraform/modules/cluster/network.tf
diff --git a/terraform/modules/rke1/nodes.tf b/terraform/modules/cluster/nodes.tf
similarity index 90%
rename from terraform/modules/rke1/nodes.tf
rename to terraform/modules/cluster/nodes.tf
index 9c24aa440a734dde762064dae78954eb7428214b..94e56d0b36cc2e3a7421a9e5b3c2c02e5a86f2a3 100644
--- a/terraform/modules/rke1/nodes.tf
+++ b/terraform/modules/cluster/nodes.tf
@@ -33,7 +33,7 @@ resource "openstack_compute_instance_v2" "machine" {
   image_name        = each.value.image_name
   availability_zone = each.value.zone
   flavor_name       = each.value.flavor
-  key_pair          = openstack_compute_keypair_v2.key.name
+  key_pair          = local.key_name
   config_drive      = false
 
   depends_on = [
@@ -57,17 +57,16 @@ resource "openstack_compute_instance_v2" "machine" {
   }
 
   user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
-    private_key    = openstack_compute_keypair_v2.key.private_key
     project_name   = data.openstack_identity_auth_scope_v3.scope.project_name
     cluster_name   = var.cluster_name
     username       = each.value.username
     node_name      = each.value.hostname
-    node_command   = rancher2_cluster.kube.cluster_registration_token.0.node_command
+    node_command   = local.kube.cluster_registration_token.0.node_command
     node_options   = lookup(local.node_options, each.value.role, "--worker")
     node_labels    = join(" ", [for l in each.value.labels : format("-l %s", replace(l, " ", "_"))])
     ncsa_security  = var.ncsa_security
     taiga_enabled  = var.taiga_enabled
-    install_docker = var.install_docker
+    install_docker = local.rke1 && var.install_docker
   }))
 
   lifecycle {
@@ -75,7 +74,9 @@ resource "openstack_compute_instance_v2" "machine" {
       key_pair,
       block_device,
       user_data,
-      availability_zone
+      availability_zone,
+      flavor_name,
+      image_name
     ]
   }
 }
diff --git a/terraform/modules/rke1/outputs.tf b/terraform/modules/cluster/outputs.tf
similarity index 77%
rename from terraform/modules/rke1/outputs.tf
rename to terraform/modules/cluster/outputs.tf
index 2e5a039e5e7b747f31eaefaa229d7442cc6bf686..d085bf758d8ce264ed787bd47d198bb1935e42f0 100644
--- a/terraform/modules/rke1/outputs.tf
+++ b/terraform/modules/cluster/outputs.tf
@@ -10,13 +10,18 @@ output "machines" {
 
 output "node_command" {
   description = "Command to join?"
-  value       = rancher2_cluster.kube.cluster_registration_token[0].node_command
+  value       = local.kube.cluster_registration_token[0].node_command
 }
 
 output "private_key_ssh" {
   description = "Private SSH key"
   sensitive   = true
-  value       = openstack_compute_keypair_v2.key.private_key
+  value       = local.private_key
+}
+
+output "key_name" {
+  description = "SSH key name"
+  value       = local.key_name
 }
 
 output "ssh_config" {
@@ -29,7 +34,7 @@ Host ${x.hostname}
   HostName ${openstack_networking_floatingip_v2.machine_ip[x.hostname].address}
   StrictHostKeyChecking no
   UserKnownHostsFile=/dev/null
-  IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
+  IdentityFile ${pathexpand("~/.ssh/${local.key_name}.pem")}
   User ${x.username}
 %{~endfor}
 
@@ -39,7 +44,7 @@ Host ${x.hostname}
   HostName ${openstack_networking_port_v2.machine_ip[x.hostname].all_fixed_ips[0]}
   StrictHostKeyChecking no
   UserKnownHostsFile=/dev/null
-  IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
+  IdentityFile ${pathexpand("~/.ssh/${local.key_name}.pem")}
   User ${x.username}
 %{~endfor}
 EOT
@@ -48,12 +53,12 @@ EOT
 output "kubeconfig" {
   description = "KUBECONFIG file"
   sensitive   = true
-  value       = rancher2_cluster.kube.kube_config
+  value       = local.kube.kube_config
 }
 
 output "kube_id" {
-  description = "OpenStack project name"
-  value       = rancher2_cluster.kube.id
+  description = "ID of rancher cluster"
+  value       = local.kube_id
 }
 
 output "floating_ip" {
diff --git a/terraform/modules/rke1/providers.tf b/terraform/modules/cluster/providers.tf
similarity index 100%
rename from terraform/modules/rke1/providers.tf
rename to terraform/modules/cluster/providers.tf
diff --git a/terraform/modules/cluster/rancher.tf b/terraform/modules/cluster/rancher.tf
new file mode 100644
index 0000000000000000000000000000000000000000..3d325aaaf2ecce944003aac2cff5662b65ab6fad
--- /dev/null
+++ b/terraform/modules/cluster/rancher.tf
@@ -0,0 +1,156 @@
+locals {
+  rke1    = var.kubernetes_version == ""
+  kube    = local.rke1 ? rancher2_cluster.kube[0] : rancher2_cluster_v2.kube[0]
+  kube_id = local.rke1 ? rancher2_cluster.kube[0].id : rancher2_cluster_v2.kube[0].cluster_v1_id
+}
+
+# ----------------------------------------------------------------------
+# cluster definition
+# ----------------------------------------------------------------------
+resource "rancher2_cluster_v2" "kube" {
+  count              = local.rke1 ? 0 : 1
+  name               = var.cluster_name
+  kubernetes_version = var.kubernetes_version
+  rke_config {
+    machine_global_config = yamlencode({
+      cni : var.network_plugin
+      disable : [
+        # K3S
+        #- coredns
+        "servicelb",
+        "traefik",
+        "local-storage",
+        #- metrics-server
+        # RKE2
+        #- rke2-coredns
+        "rke2-ingress-nginx"
+        #- rke2-metrics-server
+      ]
+    })
+    upgrade_strategy {
+      control_plane_concurrency = "1"
+      worker_concurrency        = "1"
+      control_plane_drain_options {
+        delete_empty_dir_data                = true
+        disable_eviction                     = false
+        enabled                              = true
+        force                                = false
+        grace_period                         = 120
+        ignore_daemon_sets                   = true
+        ignore_errors                        = false
+        skip_wait_for_delete_timeout_seconds = 0
+        timeout                              = 0
+      }
+      worker_drain_options {
+        delete_empty_dir_data                = true
+        disable_eviction                     = false
+        enabled                              = true
+        force                                = false
+        grace_period                         = 120
+        ignore_daemon_sets                   = true
+        ignore_errors                        = false
+        skip_wait_for_delete_timeout_seconds = 0
+        timeout                              = 0
+      }
+    }
+  }
+}
+
+resource "rancher2_cluster" "kube" {
+  count       = local.rke1 ? 1 : 0
+  name        = var.cluster_name
+  description = var.cluster_description
+  driver      = "rancherKubernetesEngine"
+
+  cluster_auth_endpoint {
+    enabled = false
+  }
+
+  rke_config {
+    kubernetes_version = var.rke1_version
+    enable_cri_dockerd = true
+    network {
+      plugin = var.network_plugin
+    }
+    ingress {
+      provider = "none"
+    }
+    upgrade_strategy {
+      drain = true
+      drain_input {
+        delete_local_data  = true
+        ignore_daemon_sets = true
+        timeout            = 120
+      }
+      max_unavailable_controlplane = 1
+      max_unavailable_worker       = 1
+    }
+  }
+}
+
+# ----------------------------------------------------------------------
+# cluster access
+# ----------------------------------------------------------------------
+resource "rancher2_cluster_role_template_binding" "admin_users" {
+  for_each          = var.admin_users
+  name              = "${local.kube_id}-user-${replace(each.value, "_", "-")}"
+  cluster_id        = local.kube_id
+  role_template_id  = "cluster-owner"
+  user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
+  depends_on        = [openstack_compute_instance_v2.machine]
+  lifecycle {
+    ignore_changes = [
+      annotations,
+      labels,
+      user_id
+    ]
+  }
+}
+
+resource "rancher2_cluster_role_template_binding" "admin_groups" {
+  for_each          = var.admin_groups
+  name              = "${local.kube_id}-group-${replace(each.value, "_", "-")}"
+  cluster_id        = local.kube_id
+  role_template_id  = "cluster-owner"
+  user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
+  depends_on        = [openstack_compute_instance_v2.machine]
+  lifecycle {
+    ignore_changes = [
+      annotations,
+      labels,
+      user_id
+    ]
+  }
+}
+
+resource "rancher2_cluster_role_template_binding" "member_users" {
+  for_each          = var.member_users
+  name              = "${local.kube_id}-user-${replace(each.value, "_", "-")}"
+  cluster_id        = local.kube_id
+  role_template_id  = "cluster-member"
+  user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
+  depends_on        = [openstack_compute_instance_v2.machine]
+  lifecycle {
+    ignore_changes = [
+      annotations,
+      labels,
+      user_id
+    ]
+  }
+}
+
+resource "rancher2_cluster_role_template_binding" "member_groups" {
+  for_each          = var.member_groups
+  name              = "${local.kube_id}-group-${replace(each.value, "_", "-")}"
+  cluster_id        = local.kube_id
+  role_template_id  = "cluster-member"
+  user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
+  depends_on        = [openstack_compute_instance_v2.machine]
+  lifecycle {
+    ignore_changes = [
+      annotations,
+      labels,
+      user_id
+    ]
+  }
+}
diff --git a/terraform/modules/rke1/security_group.tf b/terraform/modules/cluster/security_group.tf
similarity index 100%
rename from terraform/modules/rke1/security_group.tf
rename to terraform/modules/cluster/security_group.tf
diff --git a/terraform/modules/rke1/templates/user_data.tmpl b/terraform/modules/cluster/templates/user_data.tmpl
similarity index 91%
rename from terraform/modules/rke1/templates/user_data.tmpl
rename to terraform/modules/cluster/templates/user_data.tmpl
index dee3595cee56afb016bfaae1b008958534113a0a..651c3db7a57fd5afd24460684d0556e208b06929 100644
--- a/terraform/modules/rke1/templates/user_data.tmpl
+++ b/terraform/modules/cluster/templates/user_data.tmpl
@@ -30,7 +30,7 @@ users:
   - name: qualys
     gecos: Qualys Service
     groups: users
-    system: true
+    system: false
     shell: /bin/bash
     ssh_authorized_keys:
       - ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGAwkmzfc0NyhjOdi1qfI5SVQ0prU1luu24xUNeEyEvH9CX80hmXt+ZnQt8Dc7HExUXDcSZo25g71WnuvlYbZefBgHkOLY5JpDcTGuQcb7W6CXD9UG7Unu4YbmBErQhs3u2iuNLYCDxAhoVvfK4Op/sNvMKME72KM3hQ6GE+H1QD8xZZA==
@@ -49,6 +49,7 @@ write_files:
     taiga-nfs.ncsa.illinois.edu:/taiga/ncsa/radiant/${project_name}/${cluster_name} /taiga nfs defaults 0 0
   append: true
 %{ endif ~}
+%{ if install_docker ~}
 - path: /etc/docker/daemon.json
   permissions: "0644"
   owner: root:root
@@ -61,6 +62,7 @@ write_files:
       },
       "storage-driver": "overlay2"
     }
+%{ endif ~}
 %{ if ncsa_security }
 - path: /etc/rsyslog.d/00-ncsa.conf
   permissions: "0644"
@@ -103,7 +105,7 @@ write_files:
     pool ntp.ncsa.illinois.edu iburst maxsources 2
 %{ endif }
 %{ endif }
-- path: /usr/local/bin/rke1
+- path: /usr/local/bin/join_rancher
   permissions: "0700"
   owner: root:root
   content: |
@@ -111,12 +113,17 @@ write_files:
     echo "sleeping to wait for network"
     while ! curl --fail --silent --output /dev/null http://ncsa.illinois.edu ; do echo "Sleep 10s"; sleep 10; done
 %{ if ncsa_security }
+    # disable ipv6
     sysctl -w net.ipv6.conf.all.disable_ipv6=1
     sysctl -w net.ipv6.conf.default.disable_ipv6=1
     sysctl -w net.ipv6.route.flush=1
 %{ if username == "ubuntu" }
+    # disable rpcbind and rpc-statd
     systemctl disable --now rpcbind
     systemctl disable --now rpc-statd
+    # clean up default pool sources if we are using ncsa security
+    sed -i 's/^pool/# pool/' /etc/chrony/chrony.conf
+    systemctl restart chronyd
 %{ endif }
 %{ endif }
 %{ if username == "ubuntu" }
@@ -124,7 +131,7 @@ write_files:
 %{ endif }
 %{ if install_docker ~}
     echo "install docker"
-    curl https://releases.rancher.com/install-docker/24.0.sh | sh
+    curl https://releases.rancher.com/install-docker/26.1.sh | sh
     apt-get -qq update
     apt-get -y dist-upgrade
     systemctl enable --now docker
@@ -140,7 +147,7 @@ write_files:
 
 # run this command once the system is booted
 runcmd:
-- /usr/local/bin/rke1
+- /usr/local/bin/join_rancher
 
 power_state:
  delay: "+5"
diff --git a/terraform/modules/rke1/variables.tf b/terraform/modules/cluster/variables.tf
similarity index 87%
rename from terraform/modules/rke1/variables.tf
rename to terraform/modules/cluster/variables.tf
index a083aa24bf2fbfd9a8c9a6b90571fd90d3ab1530..3043524d7991c3869ea01145a9b1fc4ea45cbd64 100644
--- a/terraform/modules/rke1/variables.tf
+++ b/terraform/modules/cluster/variables.tf
@@ -40,6 +40,16 @@ variable "rancher_token" {
   description = "Access token for rancher, clusters are created as this user"
 }
 
+# RKE2
+# curl -s https://releases.rancher.com/kontainer-driver-metadata/release-v2.9/data.json | jq -r '.rke2.releases[].version'
+# K3S
+# curl -s https://releases.rancher.com/kontainer-driver-metadata/release-v2.9/data.json | jq -r '.k3s.releases[].version'
+variable "kubernetes_version" {
+  type        = string
+  description = "Version of rke2/k3s to install (leave blank to install rke1)"
+  default     = ""
+}
+
 # curl -s https://releases.rancher.com/kontainer-driver-metadata/release-v2.6/data.json | jq -r '.K8sVersionRKESystemImages | keys'
 variable "rke1_version" {
   type        = string
@@ -49,7 +59,7 @@ variable "rke1_version" {
 
 variable "network_plugin" {
   type        = string
-  description = "Network plugin to be used"
+  description = "Network plugin to be used (canal, cilium, calico, flannel, ...)"
   default     = "weave"
 }
 
@@ -155,6 +165,12 @@ variable "openstack_os_image" {
   }
 }
 
+variable "openstack_ssh_key" {
+  type        = string
+  description = "OpenStack SSH key name, leave blank to generate new key"
+  default     = ""
+}
+
 # ----------------------------------------------------------------------
 # NETWORKING
 # ----------------------------------------------------------------------
@@ -195,6 +211,6 @@ variable "taiga_enabled" {
 
 variable "install_docker" {
   type        = bool
-  description = "Install Docker when provisioning node"
+  description = "Install Docker when provisioning node (only for rke1)"
   default     = true
 }
diff --git a/terraform/modules/rke1/versions.tf b/terraform/modules/cluster/versions.tf
similarity index 100%
rename from terraform/modules/rke1/versions.tf
rename to terraform/modules/cluster/versions.tf
diff --git a/terraform/modules/rke1/key.tf b/terraform/modules/rke1/key.tf
deleted file mode 100644
index 7bee3731b52c4aad74c958ed86485b5f905b94c0..0000000000000000000000000000000000000000
--- a/terraform/modules/rke1/key.tf
+++ /dev/null
@@ -1,12 +0,0 @@
-# Each cluster will either have a shared key, or their own
-# unique key.
-resource "openstack_compute_keypair_v2" "key" {
-  #count      = 1 #var.openstack_ssh_key == "" ? 0 : 1
-  name = var.cluster_name
-}
-
-# set local variable to hold final key, either created or
-# loaded.
-locals {
-  key = var.cluster_name # var.openstack_ssh_key == "" ? var.cluster_name : var.openstack_ssh_key
-}
diff --git a/terraform/modules/rke1/rancher.tf b/terraform/modules/rke1/rancher.tf
deleted file mode 100644
index b85f03896608c321de12236694b20beea34c3b8a..0000000000000000000000000000000000000000
--- a/terraform/modules/rke1/rancher.tf
+++ /dev/null
@@ -1,103 +0,0 @@
-# ----------------------------------------------------------------------
-# cluster definition
-# ----------------------------------------------------------------------
-resource "rancher2_cluster" "kube" {
-  name        = var.cluster_name
-  description = var.cluster_description
-  driver      = "rancherKubernetesEngine"
-
-  cluster_auth_endpoint {
-    enabled = false
-  }
-
-  rke_config {
-    kubernetes_version = var.rke1_version
-    enable_cri_dockerd = true
-    network {
-      plugin = var.network_plugin
-    }
-    ingress {
-      provider = "none"
-    }
-    upgrade_strategy {
-      drain = true
-      drain_input {
-        delete_local_data  = true
-        ignore_daemon_sets = true
-        timeout            = 120
-      }
-      max_unavailable_controlplane = 1
-      max_unavailable_worker       = 1
-    }
-  }
-}
-
-# Create a new rancher2 Cluster Sync for foo-custom cluster
-resource "rancher2_cluster_sync" "kube" {
-  depends_on    = [openstack_compute_instance_v2.machine]
-  cluster_id    = rancher2_cluster.kube.id
-  wait_catalogs = false
-}
-
-# ----------------------------------------------------------------------
-# cluster access
-# ----------------------------------------------------------------------
-resource "rancher2_cluster_role_template_binding" "admin_users" {
-  for_each          = var.admin_users
-  name              = "${rancher2_cluster_sync.kube.id}-user-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_sync.kube.id
-  role_template_id  = "cluster-owner"
-  user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
-
-resource "rancher2_cluster_role_template_binding" "admin_groups" {
-  for_each          = var.admin_groups
-  name              = "${rancher2_cluster_sync.kube.id}-group-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_sync.kube.id
-  role_template_id  = "cluster-owner"
-  user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
-
-resource "rancher2_cluster_role_template_binding" "member_users" {
-  for_each          = var.member_users
-  name              = "${rancher2_cluster_sync.kube.id}-user-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_sync.kube.id
-  role_template_id  = "cluster-member"
-  user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
-
-resource "rancher2_cluster_role_template_binding" "member_groups" {
-  for_each          = var.member_groups
-  name              = "${rancher2_cluster_sync.kube.id}-group-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_sync.kube.id
-  role_template_id  = "cluster-member"
-  user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
diff --git a/terraform/modules/rke2/data.tf b/terraform/modules/rke2/data.tf
deleted file mode 100644
index abb0f77eb647e20bfceb2fd3dd52eb06a931cad0..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/data.tf
+++ /dev/null
@@ -1,15 +0,0 @@
-# external network
-data "openstack_networking_network_v2" "ext_net" {
-  name = var.openstack_external_net
-}
-
-# boot image
-data "openstack_images_image_v2" "boot" {
-  name        = var.os
-  most_recent = true
-}
-
-# openstack project name (bbXX)
-data "openstack_identity_auth_scope_v3" "scope" {
-  name = "my_scope"
-}
diff --git a/terraform/modules/rke2/key.tf b/terraform/modules/rke2/key.tf
deleted file mode 100644
index 7bee3731b52c4aad74c958ed86485b5f905b94c0..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/key.tf
+++ /dev/null
@@ -1,12 +0,0 @@
-# Each cluster will either have a shared key, or their own
-# unique key.
-resource "openstack_compute_keypair_v2" "key" {
-  #count      = 1 #var.openstack_ssh_key == "" ? 0 : 1
-  name = var.cluster_name
-}
-
-# set local variable to hold final key, either created or
-# loaded.
-locals {
-  key = var.cluster_name # var.openstack_ssh_key == "" ? var.cluster_name : var.openstack_ssh_key
-}
diff --git a/terraform/modules/rke2/network.tf b/terraform/modules/rke2/network.tf
deleted file mode 100644
index f8d697a3eff2f52f082ae7619d108b5a75f71e92..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/network.tf
+++ /dev/null
@@ -1,101 +0,0 @@
-# Each cluster has their own network. The network will have a
-# private ip spaces of 192.168.0.0/21. Each of the machines will
-# have a fixed ip address in this private IP space.
-#
-# For the worker nodes, tere will be a set of floating IP addresses
-# that can be given to a load balancer (using for example metallb).
-#
-# The control plane nodes will have a floating IP address associated,
-# until I can figure out how to give them a second IP address that
-# works correctly.
-
-# ----------------------------------------------------------------------
-# setup network, subnet and router
-# ----------------------------------------------------------------------
-
-resource "openstack_networking_network_v2" "cluster_net" {
-  name           = "${var.cluster_name}-net"
-  admin_state_up = "true"
-}
-
-resource "openstack_networking_subnet_v2" "cluster_subnet" {
-  name            = "${var.cluster_name}-subnet"
-  network_id      = openstack_networking_network_v2.cluster_net.id
-  cidr            = var.network_cidr
-  ip_version      = 4
-  dns_nameservers = var.dns_servers
-}
-
-resource "openstack_networking_router_v2" "kube_router" {
-  name                = "${var.cluster_name}-router"
-  external_network_id = data.openstack_networking_network_v2.ext_net.id
-  admin_state_up      = "true"
-}
-
-resource "openstack_networking_router_interface_v2" "kube_gateway" {
-  router_id = openstack_networking_router_v2.kube_router.id
-  subnet_id = openstack_networking_subnet_v2.cluster_subnet.id
-}
-
-# ----------------------------------------------------------------------
-# control plane
-# ----------------------------------------------------------------------
-
-resource "openstack_networking_port_v2" "controlplane_ip" {
-  count              = var.controlplane_count
-  name               = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
-  network_id         = openstack_networking_network_v2.cluster_net.id
-  security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
-  depends_on         = [openstack_networking_router_interface_v2.kube_gateway]
-}
-
-resource "openstack_networking_floatingip_v2" "controlplane_ip" {
-  count       = var.controlplane_count
-  description = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
-  pool        = data.openstack_networking_network_v2.ext_net.name
-  port_id     = element(openstack_networking_port_v2.controlplane_ip.*.id, count.index)
-}
-
-resource "openstack_networking_port_v2" "controlplane_ip_public" {
-  count              = var.controlplane_count
-  name               = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
-  network_id         = data.openstack_networking_network_v2.ext_net.id
-  security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
-}
-
-# ----------------------------------------------------------------------
-# worker nodes
-# ----------------------------------------------------------------------
-
-# create a port that will be used with the floating ip, this will be associated
-# with all of the VMs.
-resource "openstack_networking_port_v2" "floating_ip" {
-  count      = var.floating_ip
-  depends_on = [openstack_networking_subnet_v2.cluster_subnet]
-  name       = format("%s-floating-ip-%02d", var.cluster_name, count.index + 1)
-  network_id = openstack_networking_network_v2.cluster_net.id
-}
-
-# create floating ip that is associated with a fixed ip
-resource "openstack_networking_floatingip_v2" "floating_ip" {
-  count       = var.floating_ip
-  description = format("%s-floating-ip-%02d", var.cluster_name, count.index + 1)
-  pool        = data.openstack_networking_network_v2.ext_net.name
-  port_id     = element(openstack_networking_port_v2.floating_ip.*.id, count.index)
-}
-
-# create worker ip, this can route the ports for the floating ip as
-# well.
-resource "openstack_networking_port_v2" "worker_ip" {
-  count              = var.worker_count
-  name               = format("%s-worker-%02d", var.cluster_name, count.index + 1)
-  network_id         = openstack_networking_network_v2.cluster_net.id
-  security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
-  depends_on         = [openstack_networking_router_interface_v2.kube_gateway]
-  dynamic "allowed_address_pairs" {
-    for_each = openstack_networking_port_v2.floating_ip.*.all_fixed_ips.0
-    content {
-      ip_address = allowed_address_pairs.value
-    }
-  }
-}
diff --git a/terraform/modules/rke2/nodes.tf b/terraform/modules/rke2/nodes.tf
deleted file mode 100644
index 0d775ea0ff89df26856dd5051d0eba4cae5b92ae..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/nodes.tf
+++ /dev/null
@@ -1,99 +0,0 @@
-# ----------------------------------------------------------------------
-# control-plane nodes
-# ----------------------------------------------------------------------
-resource "openstack_compute_instance_v2" "controlplane" {
-  count = var.controlplane_count
-  depends_on = [
-    openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp,
-  ]
-  name        = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
-  image_name  = var.os
-  flavor_name = var.controlplane_flavor
-  key_pair    = local.key
-  security_groups = [
-    openstack_networking_secgroup_v2.cluster_security_group.name
-  ]
-  config_drive = false
-
-  user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
-    private_key  = openstack_compute_keypair_v2.key.private_key
-    project_name = data.openstack_identity_auth_scope_v3.scope.project_name
-    cluster_name = var.cluster_name
-    node_name    = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
-    node_command = rancher2_cluster_v2.kube.cluster_registration_token[0].node_command
-    node_options = "--controlplane --etcd --address awspublic --internal-address awslocal"
-  }))
-
-  block_device {
-    uuid                  = data.openstack_images_image_v2.boot.id
-    source_type           = "image"
-    volume_size           = var.controlplane_disksize
-    destination_type      = "volume"
-    delete_on_termination = true
-  }
-
-  network {
-    port = element(openstack_networking_port_v2.controlplane_ip.*.id, count.index)
-  }
-
-  # network {
-  #   port = element(openstack_networking_port_v2.controlplane_ip_public.*.id, count.index)
-  # }
-
-  lifecycle {
-    ignore_changes = [
-      key_pair,
-      block_device,
-      user_data
-    ]
-  }
-}
-
-# ----------------------------------------------------------------------
-# worker nodes
-# ----------------------------------------------------------------------
-
-resource "openstack_compute_instance_v2" "worker" {
-  count = var.worker_count
-  depends_on = [
-    openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp,
-    openstack_networking_port_v2.controlplane_ip
-  ]
-  name            = format("%s-worker-%02d", var.cluster_name, count.index + 1)
-  flavor_name     = var.worker_flavor
-  key_pair        = local.key
-  config_drive    = false
-  security_groups = [
-    openstack_networking_secgroup_v2.cluster_security_group.name
-  ]
-
-  user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
-    private_key  = openstack_compute_keypair_v2.key.private_key
-    project_name = data.openstack_identity_auth_scope_v3.scope.project_name
-    cluster_name = var.cluster_name
-    node_name    = format("%s-worker-%02d", var.cluster_name, count.index + 1)
-    node_command = rancher2_cluster_v2.kube.cluster_registration_token[0].node_command
-    node_options = "--worker --internal-address awslocal"
-  }))
-
-  block_device {
-    uuid                  = data.openstack_images_image_v2.boot.id
-    source_type           = "image"
-    volume_size           = var.worker_disksize
-    destination_type      = "volume"
-    boot_index            = 0
-    delete_on_termination = true
-  }
-
-  network {
-    port = element(openstack_networking_port_v2.worker_ip.*.id, count.index)
-  }
-
-  lifecycle {
-    ignore_changes = [
-      key_pair,
-      block_device,
-      user_data
-    ]
-  }
-}
diff --git a/terraform/modules/rke2/outputs.tf b/terraform/modules/rke2/outputs.tf
deleted file mode 100644
index 4033a2a279d6602346d19c3b61da115d179f6788..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/outputs.tf
+++ /dev/null
@@ -1,63 +0,0 @@
-output "project_name" {
-  description = "OpenStack project name"
-  value       = data.openstack_identity_auth_scope_v3.scope.project_name
-}
-
-output "node_command" {
-  description = "Command to join?"
-  value       = rancher2_cluster_v2.kube.cluster_registration_token[0].node_command
-}
-
-output "private_key_ssh" {
-  description = "Private SSH key"
-  sensitive   = true
-  value       = openstack_compute_keypair_v2.key.private_key
-}
-
-output "ssh_config" {
-  description = "SSH Configuration file for use with ssh/config"
-  value       = <<-EOT
-# Automatically created by terraform
-
-%{~for i, x in openstack_compute_instance_v2.controlplane.*}
-Host ${x.name}
-  HostName ${openstack_networking_floatingip_v2.controlplane_ip[i].address}
-  StrictHostKeyChecking no
-  UserKnownHostsFile=/dev/null
-  IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
-  User centos
-
-%{~endfor}
-%{~for x in openstack_compute_instance_v2.worker.*}
-Host ${x.name}
-  HostName ${x.network[0].fixed_ip_v4}
-  StrictHostKeyChecking no
-  ProxyJump ${openstack_compute_instance_v2.controlplane[0].name}
-  UserKnownHostsFile=/dev/null
-  IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
-  User centos
-
-%{~endfor}
-EOT
-}
-
-output "kubeconfig" {
-  description = "KUBECONFIG file"
-  sensitive   = true
-  value       = rancher2_cluster_v2.kube.kube_config
-}
-
-output "kube_id" {
-  description = "OpenStack project name"
-  value       = rancher2_cluster_v2.kube.cluster_v1_id
-}
-
-output "floating_ip" {
-  description = "Map for floating ips and associated private ips"
-  value = [
-    for i, ip in openstack_networking_floatingip_v2.floating_ip.*.address : {
-      private_ip = element(flatten(openstack_networking_port_v2.floating_ip.*.all_fixed_ips), i)
-      public_ip  = ip
-    }
-  ]
-}
diff --git a/terraform/modules/rke2/providers.tf b/terraform/modules/rke2/providers.tf
deleted file mode 100644
index 0fc99bb4bb92769f39b2b02d7c1a5c10b73899f2..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/providers.tf
+++ /dev/null
@@ -1,11 +0,0 @@
-provider "openstack" {
-  auth_url                      = var.openstack_url
-  region                        = "RegionOne"
-  application_credential_id     = var.openstack_credential_id
-  application_credential_secret = var.openstack_credential_secret
-}
-
-provider "rancher2" {
-  api_url   = var.rancher_url
-  token_key = var.rancher_token
-}
diff --git a/terraform/modules/rke2/rancher.tf b/terraform/modules/rke2/rancher.tf
deleted file mode 100644
index b78b60fb940b73c7b1aa54b86684f7f161126acd..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/rancher.tf
+++ /dev/null
@@ -1,156 +0,0 @@
-# ----------------------------------------------------------------------
-# cluster definition
-# ----------------------------------------------------------------------
-resource "rancher2_cluster_v2" "kube" {
-  name                                     = var.cluster_name
-  default_cluster_role_for_project_members = "user"
-  kubernetes_version                       = var.rke2_version
-
-  agent_env_vars {
-    name  = "CATTLE_AGENT_LOGLEVEL"
-    value = "info"
-  }
-
-  rke_config {
-    local_auth_endpoint {
-      enabled = var.cluster_direct_access
-    }
-    machine_global_config = <<EOF
-disable:
-- rke2-ingress-nginx
-EOF
-    upgrade_strategy {
-      control_plane_concurrency = 1
-      control_plane_drain_options {
-        ignore_daemon_sets    = true
-        delete_empty_dir_data = true
-        grace_period          = 120
-      }
-      worker_concurrency = 1
-      worker_drain_options {
-        ignore_daemon_sets    = true
-        delete_empty_dir_data = true
-        grace_period          = 120
-      }
-    }
-  }
-}
-
-# Create a new rancher2 Cluster Sync for cluster
-resource "rancher2_cluster_sync" "kube" {
-  depends_on      = [ openstack_compute_instance_v2.controlplane[0] ]
-  cluster_id      = rancher2_cluster_v2.kube.cluster_v1_id
-  wait_catalogs   = false
-}
-
-
-# ----------------------------------------------------------------------
-# applications
-# ----------------------------------------------------------------------
-resource "rancher2_app_v2" "monitoring" {
-  count      = var.monitoring_enabled ? 1 : 0
-  cluster_id = rancher2_cluster_sync.kube.id
-  name       = "rancher-monitoring"
-  namespace  = "cattle-monitoring-system"
-  repo_name  = "rancher-charts"
-  chart_name = "rancher-monitoring"
-  //  values        = <<EOF
-  //prometheus:
-  //  resources:
-  //    core:
-  //      limits:
-  //        cpu: "4000m"
-  //        memory: "6144Mi"
-  //EOF
-  lifecycle {
-    ignore_changes = [
-      values
-    ]
-  }
-}
-
-resource "rancher2_app_v2" "longhorn" {
-  count      = var.longhorn_enabled ? 1 : 0
-  cluster_id = rancher2_cluster_v2.kube.cluster_v1_id
-  name       = "longhorn"
-  namespace  = "longhorn-system"
-  repo_name  = "rancher-charts"
-  chart_name = "longhorn"
-  values     = <<EOF
-defaultSettings:
-  backupTarget: nfs://radiant-nfs.ncsa.illinois.edu:/radiant/projects/${data.openstack_identity_auth_scope_v3.scope.project_name}/${var.cluster_name}/backup
-  defaultReplicaCount: ${var.longhorn_replicas}
-persistence:
-  defaultClass: false
-  defaultClassReplicaCount: ${var.longhorn_replicas}
-EOF
-  lifecycle {
-    ignore_changes = [
-      values
-    ]
-  }
-}
-
-# ----------------------------------------------------------------------
-# cluster access
-# ----------------------------------------------------------------------
-
-resource "rancher2_cluster_role_template_binding" "admin_users" {
-  for_each          = var.admin_users
-  name              = "admin-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_v2.kube.cluster_v1_id
-  role_template_id  = "cluster-owner"
-  user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
-
-resource "rancher2_cluster_role_template_binding" "admin_groups" {
-  for_each          = var.admin_groups
-  name              = "admin-group-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_v2.kube.cluster_v1_id
-  role_template_id  = "cluster-owner"
-  user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
-
-resource "rancher2_cluster_role_template_binding" "member_users" {
-  for_each          = var.member_users
-  name              = "member-user-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_v2.kube.cluster_v1_id
-  role_template_id  = "cluster-member"
-  user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
-
-resource "rancher2_cluster_role_template_binding" "member_groups" {
-  for_each          = var.member_groups
-  name              = "member-group-${replace(each.value, "_", "-")}"
-  cluster_id        = rancher2_cluster_v2.kube.cluster_v1_id
-  role_template_id  = "cluster-member"
-  user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
-  lifecycle {
-    ignore_changes = [
-      annotations,
-      labels,
-      user_id
-    ]
-  }
-}
diff --git a/terraform/modules/rke2/security_group.tf b/terraform/modules/rke2/security_group.tf
deleted file mode 100644
index 5c401d0b6393c26c0ac74cd7a7aafb3a1f61b220..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/security_group.tf
+++ /dev/null
@@ -1,129 +0,0 @@
-resource "openstack_networking_secgroup_v2" "cluster_security_group" {
-  name        = var.cluster_name
-  description = "${var.cluster_name} kubernetes cluster security group"
-}
-
-# ----------------------------------------------------------------------
-# Egress
-# ----------------------------------------------------------------------
-
-#Egress  IPv4  Any Any 0.0.0.0/0 - - 
-#resource "openstack_networking_secgroup_rule_v2" "egress_ipv4" {
-#  direction         = "egress"
-#  ethertype         = "IPv4"
-#  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-#  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-#}
-
-#Egress  IPv6  Any Any ::/0  - - 
-#resource "openstack_networking_secgroup_rule_v2" "egress_ipv6" {
-#  direction         = "egress"
-#  ethertype         = "IPv6"
-#  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-#  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-#}
-
-# ----------------------------------------------------------------------
-# Ingress
-# ----------------------------------------------------------------------
-
-# Ingress IPv4  ICMP  Any 0.0.0.0/0 - - 
-resource "openstack_networking_secgroup_rule_v2" "ingress_icmp" {
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "icmp"
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-# Ingress IPv4  TCP 22 (SSH)  0.0.0.0/0 - - 
-resource "openstack_networking_secgroup_rule_v2" "ingress_ssh" {
-  description       = "ssh"
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  port_range_min    = 22
-  port_range_max    = 22
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-# Ingress IPv4  TCP 80 (HTTP) 0.0.0.0/0 - - 
-resource "openstack_networking_secgroup_rule_v2" "ingress_http" {
-  description       = "http"
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  port_range_min    = 80
-  port_range_max    = 80
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-# Ingress IPv4  TCP 443 (HTTPS) 0.0.0.0/0 - - 
-resource "openstack_networking_secgroup_rule_v2" "ingress_https" {
-  description       = "https"
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  port_range_min    = 443
-  port_range_max    = 443
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-# Ingress IPv4  TCP 6443  141.142.0.0/16  - kube api  
-resource "openstack_networking_secgroup_rule_v2" "ingress_kubeapi" {
-  description       = "kubeapi"
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  port_range_min    = 6443
-  port_range_max    = 6443
-  remote_ip_prefix  = "141.142.0.0/16"
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-# Ingress IPv4  TCP 9345  141.142.0.0/16  - rke2 api  
-resource "openstack_networking_secgroup_rule_v2" "ingress_rke2api" {
-  description       = "rke2api"
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  port_range_min    = 9345
-  port_range_max    = 9345
-  remote_ip_prefix  = "141.142.0.0/16"
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-# Ingress IPv4  TCP 30000 - 32767 0.0.0.0/0 - nodeport  
-resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport" {
-  description       = "nodeport"
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  port_range_min    = 30000
-  port_range_max    = 32767
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-resource "openstack_networking_secgroup_rule_v2" "same_security_group_ingress_tcp" {
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "tcp"
-  remote_group_id   = openstack_networking_secgroup_v2.cluster_security_group.id
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
-
-resource "openstack_networking_secgroup_rule_v2" "same_security_group_ingress_udp" {
-  direction         = "ingress"
-  ethertype         = "IPv4"
-  protocol          = "udp"
-  remote_group_id   = openstack_networking_secgroup_v2.cluster_security_group.id
-  security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
-  depends_on        = [openstack_networking_secgroup_v2.cluster_security_group]
-}
diff --git a/terraform/modules/rke2/templates/user_data.tmpl b/terraform/modules/rke2/templates/user_data.tmpl
deleted file mode 100644
index 7b19e42d52870d5a228268f9e866f516326d1bc4..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/templates/user_data.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-#cloud-config
-
-# SSH config
-no_ssh_fingerprints: false
-ssh:
-  emit_keys_to_console: false
-
-# update and upgrade instance
-package_update: true
-package_upgrade: true
-
-# files to be created on the system
-write_files:
-- path: /etc/fstab
-  permissions: "0644"
-  owner: root:root
-  content: |
-    taiga-nfs.ncsa.illinois.edu:/taiga/ncsa/radiant/${project_name}/${cluster_name} /taiga nfs defaults 0 0
-  append: true
-- path: /etc/profile.d/kubectl.sh
-  permissions: "0644"
-  owner: root:root
-  content: |
-    export KUBECONFIG=/etc/rancher/rke2/rke2.yaml
-    export PATH=$${PATH}:/var/lib/rancher/rke2/bin
-- path: /etc/NetworkManager/conf.d/50-rke2.conf
-  permissions: "0644"
-  owner: root:root
-  content: |
-    [keyfile]
-    unmanaged-devices=interface-name:cali*;interface-name:flannel*
-
-# run this command once the system is booted
-runcmd:
-- ${node_command} ${node_options} --node-name ${node_name}
-- mount -av
diff --git a/terraform/modules/rke2/variables.tf b/terraform/modules/rke2/variables.tf
deleted file mode 100644
index 31f136d73c985f9dd7e09fe89b2b4c4aa46ab646..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/variables.tf
+++ /dev/null
@@ -1,195 +0,0 @@
-# ----------------------------------------------------------------------
-# CLUSTER INFO
-# ----------------------------------------------------------------------
-variable "cluster_name" {
-  type        = string
-  description = "Desired name of new cluster"
-}
-
-variable "cluster_description" {
-  type        = string
-  description = "Description of new cluster"
-  default     = ""
-}
-
-variable "cluster_direct_access" {
-  type        = bool
-  description = "Allow for direct access"
-  default     = true
-}
-
-# ----------------------------------------------------------------------
-# RANCHER
-# ----------------------------------------------------------------------
-
-variable "rancher_url" {
-  type        = string
-  description = "URL where rancher runs"
-  default     = "https://gonzo-rancher.ncsa.illinois.edu"
-}
-
-variable "rancher_token" {
-  type        = string
-  sensitive   = true
-  description = "Access token for rancher, clusters are created as this user"
-}
-
-# get latest version from rancher using:
-# curl https://releases.rancher.com/kontainer-driver-metadata/release-v2.6/data.json | jq '.rke2.releases | .[].version' | sort
-variable "rke2_version" {
-  type        = string
-  description = "Version of rke2 to install."
-  default     = ""
-}
-
-# ----------------------------------------------------------------------
-# APPLICATIONS
-# ----------------------------------------------------------------------
-
-variable "monitoring_enabled" {
-  type        = bool
-  description = "Enable monitoring in rancher"
-  default     = true
-}
-
-variable "longhorn_enabled" {
-  type        = bool
-  description = "Enable longhorn storage"
-  default     = true
-}
-
-variable "longhorn_replicas" {
-  type        = string
-  description = "Number of replicas"
-  default     = 3
-}
-
-# ----------------------------------------------------------------------
-# USERS
-# ----------------------------------------------------------------------
-
-variable "admin_users" {
-  type        = set(string)
-  description = "List of LDAP users with admin access to cluster."
-  default     = []
-}
-
-variable "admin_groups" {
-  type        = set(string)
-  description = "List of LDAP groups with admin access to cluster."
-  default     = []
-}
-
-variable "member_users" {
-  type        = set(string)
-  description = "List of LDAP users with access to cluster."
-  default     = []
-}
-
-variable "member_groups" {
-  type        = set(string)
-  description = "List of LDAP groups with access to cluster."
-  default     = []
-}
-
-# ----------------------------------------------------------------------
-# OPENSTACK
-# ----------------------------------------------------------------------
-
-variable "openstack_url" {
-  type        = string
-  description = "OpenStack URL"
-  default     = "https://radiant.ncsa.illinois.edu"
-}
-
-variable "openstack_credential_id" {
-  type        = string
-  sensitive   = true
-  description = "Openstack credentials"
-}
-
-variable "openstack_credential_secret" {
-  type        = string
-  sensitive   = true
-  description = "Openstack credentials"
-}
-
-variable "openstack_external_net" {
-  type        = string
-  description = "OpenStack external network"
-  default     = "ext-net"
-}
-
-variable "openstack_ssh_key" {
-  type        = string
-  description = "existing SSH key to use, leave blank for a new one"
-  default     = ""
-}
-
-# ----------------------------------------------------------------------
-# OPENSTACK KUBERNETES
-# ----------------------------------------------------------------------
-
-variable "os" {
-  type        = string
-  description = "Base image to use for the OS"
-  default     = "CentOS-7-GenericCloud-Latest"
-}
-
-variable "controlplane_count" {
-  type        = string
-  description = "Desired quantity of control-plane nodes"
-  default     = 1
-}
-
-variable "controlplane_flavor" {
-  type        = string
-  description = "Desired flavor of control-plane nodes"
-  default     = "m1.medium"
-}
-
-variable "controlplane_disksize" {
-  type        = string
-  description = "Desired disksize of control-plane nodes"
-  default     = 40
-}
-
-variable "worker_count" {
-  type        = string
-  description = "Desired quantity of worker nodes"
-  default     = 1
-}
-
-variable "worker_flavor" {
-  type        = string
-  description = "Desired flavor of worker nodes"
-  default     = "m1.large"
-}
-
-variable "worker_disksize" {
-  type        = string
-  description = "Desired disksize of worker nodes"
-  default     = 40
-}
-
-# ----------------------------------------------------------------------
-# NETWORKING
-# ----------------------------------------------------------------------
-
-variable "network_cidr" {
-  type        = string
-  description = "CIDR to be used for internal network"
-  default     = "192.168.0.0/21"
-}
-
-variable "dns_servers" {
-  type        = set(string)
-  description = "DNS Servers"
-  default     = ["141.142.2.2", "141.142.230.144"]
-}
-
-variable "floating_ip" {
-  type        = string
-  description = "Number of floating IP addresses available for loadbalancers"
-  default     = 2
-}
diff --git a/terraform/modules/rke2/versions.tf b/terraform/modules/rke2/versions.tf
deleted file mode 100644
index f6e381416ed3cdedd5411193f167355b80620d08..0000000000000000000000000000000000000000
--- a/terraform/modules/rke2/versions.tf
+++ /dev/null
@@ -1,16 +0,0 @@
-terraform {
-  required_providers {
-    openstack = {
-      source  = "terraform-provider-openstack/openstack"
-      version = ">= 1.43.0"
-    }
-    rancher2 = {
-      source  = "rancher/rancher2"
-      version = ">= 1.21.0"
-    }
-    random = {
-      source  = "hashicorp/random"
-      version = ">= 3.1.0"
-    }
-  }
-}