Skip to content
Snippets Groups Projects
Commit 5d2b2b26 authored by Rob Kooper's avatar Rob Kooper
Browse files

Merge branch 'v2' into 'main'

backwards compatible

See merge request !1
parents 65a03913 0fd214c5
No related branches found
Tags v2.1.0
1 merge request!1backwards compatible
......@@ -4,6 +4,14 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/).
## 2.1.0 - 2023-08-03
In the next major update all backwards compatible code will be removed. Please migrate to teh cluster_machine setup and set controlplane_count and worker_count to 0
### Changed
- This add backwards compatibility to the stack, you still need ot define the cluster machines
## 2.0.0 - 2023-06-28
This is a breaking change. You will need to update your terraform code to use this new version. This is an example of the variable `cluster_machine`.
......
locals {
cluster_argocd_url = "${var.rancher_url}/k8s/clusters/${var.cluster_kube_id}"
cluster = templatefile("${path.module}/templates/cluster.yaml.tmpl", {
argocd_cluster = templatefile("${path.module}/templates/cluster.yaml.tmpl", {
cluster_name = var.cluster_name
cluster_url = local.cluster_argocd_url
rancher_token = var.rancher_token
})
project = templatefile("${path.module}/templates/project.yaml.tmpl", {
argocd_cluster_project = templatefile("${path.module}/templates/project.yaml.tmpl", {
cluster_name = var.cluster_name
cluster_url = local.cluster_argocd_url
admin_groups = var.admin_groups
......@@ -16,7 +16,7 @@ locals {
member_users = var.member_users
})
app = templatefile("${path.module}/templates/argocd.yaml.tmpl", {
argocd_cluster_app = templatefile("${path.module}/templates/argocd.yaml.tmpl", {
cluster_name = var.cluster_name
cluster_url = local.cluster_argocd_url
cluster_kube_id = var.cluster_kube_id
......@@ -55,15 +55,15 @@ locals {
# ----------------------------------------------------------------------
resource "kubectl_manifest" "argocd_cluster" {
count = var.argocd_kube_id != "" ? 1 : 0
yaml_body = local.cluster
yaml_body = local.argocd_cluster
}
resource "kubectl_manifest" "argocd_project" {
resource "kubectl_manifest" "argocd_cluster_project" {
count = var.argocd_kube_id != "" ? 1 : 0
yaml_body = local.project
yaml_body = local.argocd_cluster_project
}
resource "kubectl_manifest" "argocd_app" {
resource "kubectl_manifest" "argocd_cluster_app" {
count = var.argocd_kube_id != "" ? 1 : 0
yaml_body = local.app
yaml_body = local.argocd_cluster_app
}
output "cluster" {
output "argocd_cluster" {
description = "ArgoCD cluster definition"
sensitive = true
value = local.cluster
value = local.argocd_cluster
}
output "project" {
output "argocd_cluster_project" {
description = "ArgoCD project and permissions"
sensitive = true
value = local.project
value = local.argocd_cluster_project
}
output "app" {
output "argocd_cluster_app" {
description = "ArogCD app of apps"
sensitive = true
value = local.app
value = local.argocd_cluster_app
}
# external network
data "openstack_networking_network_v2" "ext_net" {
name = var.openstack_external_net
}
# boot image
# DEPRECATED
data "openstack_images_image_v2" "boot" {
name = var.os
most_recent = true
}
# openstack project name (bbXX)
data "openstack_identity_auth_scope_v3" "scope" {
name = "my_scope"
}
data "openstack_images_image_v2" "os_image" {
for_each = var.openstack_os_image
name = each.value
most_recent = true
}
# Each cluster will either have a shared key, or their own
# unique key.
resource "openstack_compute_keypair_v2" "key" {
#count = 1 #var.openstack_ssh_key == "" ? 0 : 1
name = var.cluster_name
}
# set local variable to hold final key, either created or
# loaded.
locals {
key = var.cluster_name # var.openstack_ssh_key == "" ? var.cluster_name : var.openstack_ssh_key
}
......@@ -2,14 +2,10 @@
# private ip spaces of 192.168.0.0/21. Each of the machines will
# have a fixed ip address in this private IP space.
#
# For the worker machines, there will be a set of floating IP addresses
# For the worker nodes, there will be a set of floating IP addresses
# that can be given to a load balancer (using for example metallb).
#
data "openstack_networking_network_v2" "ext_net" {
name = var.openstack_external_net
}
# ----------------------------------------------------------------------
# setup network, subnet and router
# ----------------------------------------------------------------------
......@@ -85,4 +81,43 @@ resource "openstack_networking_floatingip_v2" "machine_ip" {
port_id = openstack_networking_port_v2.machine_ip[each.key].id
}
# ----------------------------------------------------------------------
# control plane
# DEPRECATED
# ----------------------------------------------------------------------
resource "openstack_networking_port_v2" "controlplane_ip" {
count = var.controlplane_count
name = local.controlplane[count.index]
network_id = openstack_networking_network_v2.cluster_net.id
security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
depends_on = [openstack_networking_router_interface_v2.kube_gateway]
}
resource "openstack_networking_floatingip_v2" "controlplane_ip" {
count = var.controlplane_count
description = format("%s-controlplane-%d", var.cluster_name, count.index + 1)
pool = data.openstack_networking_network_v2.ext_net.name
port_id = element(openstack_networking_port_v2.controlplane_ip.*.id, count.index)
}
# ----------------------------------------------------------------------
# worker nodes
# DEPRECATED
# ----------------------------------------------------------------------
# create worker ip, this can route the ports for the floating ip as
# well.
resource "openstack_networking_port_v2" "worker_ip" {
count = var.worker_count
name = local.worker[count.index]
network_id = openstack_networking_network_v2.cluster_net.id
security_group_ids = [openstack_networking_secgroup_v2.cluster_security_group.id]
depends_on = [openstack_networking_router_interface_v2.kube_gateway]
dynamic "allowed_address_pairs" {
for_each = openstack_networking_port_v2.floating_ip.*.all_fixed_ips.0
content {
ip_address = allowed_address_pairs.value
}
}
}
locals {
images = {
"centos" = "CentOS-7-GenericCloud-Latest",
"ubuntu" = "Ubuntu Jammy (22.04) latest"
}
usernames = {
"centos" = "centos",
"ubuntu" = "ubuntu"
......@@ -18,10 +13,10 @@ locals {
for x in var.cluster_machines : [
for i in range(x.count == null ? 1 : x.count) : {
hostname = format("%s-%s-%02d", var.cluster_name, x.name, (i + 1))
username = lookup(local.usernames, x.os, "centos")
image_name = lookup(local.images, x.os, "CentOS-7-GenericCloud-Latest")
username = lookup(local.usernames, x.os, "UNDEFINED")
image_name = lookup(var.openstack_os_image, x.os, "UNDEFINED")
flavor = try(x.flavor, "gp.medium")
image_id = data.openstack_images_image_v2.boot[try(x.os, "centos")].id
image_id = data.openstack_images_image_v2.os_image[try(x.os, "UNDEFINED")].id
disk_size = try(x.disk, 40)
zone = try(x.zone, "nova")
role = try(x.role, "worker")
......@@ -31,23 +26,16 @@ locals {
]
])
jumphost = [for vm in local.machines : vm.hostname if vm.floating_ip][0]
}
data "openstack_images_image_v2" "boot" {
for_each = local.images
name = each.value
most_recent = true
}
jumphost = concat([for vm in local.machines : vm.hostname if vm.floating_ip], local.controlplane)[0]
data "openstack_identity_auth_scope_v3" "scope" {
name = var.cluster_name
}
resource "openstack_compute_keypair_v2" "key" {
name = var.cluster_name
# DEPRECATED
controlplane = [for l in range(var.controlplane_count) : var.old_hostnames ? format("%s-controlplane-%d", var.cluster_name, l) : format("%s-controlplane-%d", var.cluster_name, l + 1)]
worker = [for l in range(var.worker_count) : var.old_hostnames ? format("%s-worker-%d", var.cluster_name, l) : format("%s-worker-%02d", var.cluster_name, l + 1)]
}
# ----------------------------------------------------------------------
# cluster nodes
# ----------------------------------------------------------------------
resource "openstack_compute_instance_v2" "machine" {
for_each = { for vm in local.machines : vm.hostname => vm }
name = each.value.hostname
......@@ -88,3 +76,118 @@ resource "openstack_compute_instance_v2" "machine" {
node_labels = join(" ", [for l in each.value.labels : format("-l %s", replace(l, " ", "_"))])
}))
}
# ----------------------------------------------------------------------
# control-plane nodes
# DEPRECATED
# ----------------------------------------------------------------------
resource "openstack_compute_instance_v2" "controlplane" {
count = var.controlplane_count
name = local.controlplane[count.index]
image_name = var.os
availability_zone = var.openstack_zone
flavor_name = var.controlplane_flavor
key_pair = openstack_compute_keypair_v2.key.name
config_drive = false
depends_on = [
openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp,
]
security_groups = [
openstack_networking_secgroup_v2.cluster_security_group.name
]
#echo "update hosts"
#%{ for ip in openstack_networking_port_v2.worker_ip[count.index].all_fixed_ips }
#echo "$${ip} $${node_name} $(hostname) $(hostname -f)" >> /etc/hosts
#%{ endfor }
user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
private_key = openstack_compute_keypair_v2.key.private_key
project_name = data.openstack_identity_auth_scope_v3.scope.project_name
cluster_name = var.cluster_name
username = "centos"
node_name = local.controlplane[count.index]
node_command = rancher2_cluster.kube.cluster_registration_token.0.node_command
node_options = "--address awspublic --internal-address awslocal --controlplane --etcd"
node_labels = ""
}))
block_device {
uuid = data.openstack_images_image_v2.boot.id
source_type = "image"
volume_size = var.controlplane_disksize
destination_type = "volume"
delete_on_termination = true
}
network {
port = element(openstack_networking_port_v2.controlplane_ip.*.id, count.index)
}
lifecycle {
ignore_changes = [
key_pair,
block_device,
user_data,
availability_zone
]
}
}
# ----------------------------------------------------------------------
# worker nodes
# DEPRECATED
# ----------------------------------------------------------------------
resource "openstack_compute_instance_v2" "worker" {
count = var.worker_count
name = local.worker[count.index]
image_name = var.os
availability_zone = var.openstack_zone
flavor_name = var.worker_flavor
key_pair = local.key
config_drive = false
depends_on = [
openstack_networking_secgroup_rule_v2.same_security_group_ingress_tcp
]
security_groups = [
openstack_networking_secgroup_v2.cluster_security_group.name
]
user_data = base64encode(templatefile("${path.module}/templates/user_data.tmpl", {
private_key = openstack_compute_keypair_v2.key.private_key
project_name = data.openstack_identity_auth_scope_v3.scope.project_name
cluster_name = var.cluster_name
node_name = local.worker[count.index]
username = "centos"
node_command = rancher2_cluster.kube.cluster_registration_token.0.node_command
node_options = "--worker"
node_labels = ""
}))
block_device {
uuid = data.openstack_images_image_v2.boot.id
source_type = "image"
volume_size = var.worker_disksize
destination_type = "volume"
boot_index = 0
delete_on_termination = true
}
network {
port = element(openstack_networking_port_v2.worker_ip.*.id, count.index)
}
lifecycle {
ignore_changes = [
key_pair,
block_device,
user_data,
availability_zone
]
}
}
......@@ -24,6 +24,15 @@ output "ssh_config" {
value = <<-EOT
# Automatically created by terraform
%{~for i, x in openstack_compute_instance_v2.controlplane.*}
Host ${x.name}
HostName ${openstack_networking_floatingip_v2.controlplane_ip[i].address}
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
User centos
%{~endfor}
%{~for x in [for m in local.machines : m if m.floating_ip]}
Host ${x.hostname}
HostName ${openstack_networking_floatingip_v2.machine_ip[x.hostname].address}
......@@ -33,6 +42,16 @@ Host ${x.hostname}
User ${x.username}
%{~endfor}
%{~for x in openstack_compute_instance_v2.worker.*}
Host ${x.name}
HostName ${x.network[0].fixed_ip_v4}
StrictHostKeyChecking no
ProxyJump ${local.jumphost}
UserKnownHostsFile=/dev/null
IdentityFile ${pathexpand("~/.ssh/${var.cluster_name}.pem")}
User centos
%{~endfor}
%{~for x in [for m in local.machines : m if !m.floating_ip]}
Host ${x.hostname}
ProxyJump ${local.jumphost}
......
......@@ -32,13 +32,20 @@ resource "rancher2_cluster" "kube" {
}
}
# Create a new rancher2 Cluster Sync for foo-custom cluster
resource "rancher2_cluster_sync" "kube" {
depends_on = [openstack_compute_instance_v2.controlplane[0]]
cluster_id = rancher2_cluster.kube.id
wait_catalogs = false
}
# ----------------------------------------------------------------------
# cluster access
# ----------------------------------------------------------------------
resource "rancher2_cluster_role_template_binding" "admin_users" {
for_each = var.admin_users
name = "${rancher2_cluster.kube.id}-user-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster.kube.id
name = "${rancher2_cluster_sync.kube.id}-user-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster_sync.kube.id
role_template_id = "cluster-owner"
user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
lifecycle {
......@@ -52,8 +59,8 @@ resource "rancher2_cluster_role_template_binding" "admin_users" {
resource "rancher2_cluster_role_template_binding" "admin_groups" {
for_each = var.admin_groups
name = "${rancher2_cluster.kube.id}-group-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster.kube.id
name = "${rancher2_cluster_sync.kube.id}-group-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster_sync.kube.id
role_template_id = "cluster-owner"
user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
lifecycle {
......@@ -67,8 +74,8 @@ resource "rancher2_cluster_role_template_binding" "admin_groups" {
resource "rancher2_cluster_role_template_binding" "member_users" {
for_each = var.member_users
name = "${rancher2_cluster.kube.id}-user-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster.kube.id
name = "${rancher2_cluster_sync.kube.id}-user-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster_sync.kube.id
role_template_id = "cluster-member"
user_principal_id = "openldap_user://uid=${each.value},ou=People,dc=ncsa,dc=illinois,dc=edu"
lifecycle {
......@@ -82,8 +89,8 @@ resource "rancher2_cluster_role_template_binding" "member_users" {
resource "rancher2_cluster_role_template_binding" "member_groups" {
for_each = var.member_groups
name = "${rancher2_cluster.kube.id}-group-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster.kube.id
name = "${rancher2_cluster_sync.kube.id}-group-${replace(each.value, "_", "-")}"
cluster_id = rancher2_cluster_sync.kube.id
role_template_id = "cluster-member"
user_principal_id = "openldap_group://cn=${each.value},ou=Groups,dc=ncsa,dc=illinois,dc=edu"
lifecycle {
......@@ -94,3 +101,57 @@ resource "rancher2_cluster_role_template_binding" "member_groups" {
]
}
}
# ----------------------------------------------------------------------
# longhorn storage
# DEPRECATED
# ----------------------------------------------------------------------
resource "rancher2_app_v2" "longhorn-system" {
count = var.longhorn_enabled ? 1 : 0
cluster_id = rancher2_cluster_sync.kube.cluster_id
name = "longhorn"
namespace = "longhorn-system"
repo_name = "rancher-charts"
chart_name = "longhorn"
project_id = rancher2_cluster_sync.kube.system_project_id
values = <<EOF
defaultSettings:
backupTarget: nfs://radiant-nfs.ncsa.illinois.edu:/radiant/projects/${data.openstack_identity_auth_scope_v3.scope.project_name}/${var.cluster_name}/backup
defaultReplicaCount: ${var.longhorn_replicas}
persistence:
defaultClass: false
defaultClassReplicaCount: ${var.longhorn_replicas}
EOF
lifecycle {
ignore_changes = [
values
]
}
}
# ----------------------------------------------------------------------
# monitoring
# DEPRECATED
# ----------------------------------------------------------------------
resource "rancher2_app_v2" "monitor" {
count = var.monitoring_enabled ? 1 : 0
cluster_id = rancher2_cluster_sync.kube.cluster_id
name = "rancher-monitoring"
namespace = "cattle-monitoring-system"
repo_name = "rancher-charts"
chart_name = "rancher-monitoring"
project_id = rancher2_cluster_sync.kube.system_project_id
// values = <<EOF
//prometheus:
// resources:
// core:
// limits:
// cpu: "4000m"
// memory: "6144Mi"
//EOF
lifecycle {
ignore_changes = [
values
]
}
}
......@@ -3,6 +3,26 @@ resource "openstack_networking_secgroup_v2" "cluster_security_group" {
description = "${var.cluster_name} kubernetes cluster security group"
}
# ----------------------------------------------------------------------
# Egress
# ----------------------------------------------------------------------
#Egress IPv4 Any Any 0.0.0.0/0 - -
#resource "openstack_networking_secgroup_rule_v2" "egress_ipv4" {
# direction = "egress"
# ethertype = "IPv4"
# security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
# depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
#}
#Egress IPv6 Any Any ::/0 - -
#resource "openstack_networking_secgroup_rule_v2" "egress_ipv6" {
# direction = "egress"
# ethertype = "IPv6"
# security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
# depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
#}
# ----------------------------------------------------------------------
# Ingress
# ----------------------------------------------------------------------
......@@ -52,30 +72,32 @@ resource "openstack_networking_secgroup_rule_v2" "ingress_https" {
depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
}
# Ingress IPv4 TCP 6443 141.142.0.0/16 - kube api
# Ingress IPv4 TCP 6443 racher - kube api
resource "openstack_networking_secgroup_rule_v2" "ingress_kubeapi" {
description = "kubeapi"
for_each = var.openstack_security_kubernetes
description = "kubeapi ${each.key}"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 6443
port_range_max = 6443
remote_ip_prefix = var.openstack_security_kubernetes
remote_ip_prefix = each.value
security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
}
# Ingress IPv4 TCP 30000 - 32767 0.0.0.0/0 - nodeport
# resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport" {
# description = "nodeport"
# direction = "ingress"
# ethertype = "IPv4"
# protocol = "tcp"
# port_range_min = 30000
# port_range_max = 32767
# security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
# depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
# }
# Ingress IPv4 TCP 30000 - 32767 0.0.0.0/0 - nodeport
# DEPRECATED
resource "openstack_networking_secgroup_rule_v2" "ingress_nodeport" {
description = "nodeport"
direction = "ingress"
ethertype = "IPv4"
protocol = "tcp"
port_range_min = 30000
port_range_max = 32767
security_group_id = openstack_networking_secgroup_v2.cluster_security_group.id
depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
}
resource "openstack_networking_secgroup_rule_v2" "same_security_group_ingress_tcp" {
direction = "ingress"
......@@ -95,7 +117,6 @@ resource "openstack_networking_secgroup_rule_v2" "same_security_group_ingress_ud
depends_on = [openstack_networking_secgroup_v2.cluster_security_group]
}
# Ingress IPv4 TCP 1 - 65535 192.168.100.0/24 - -
# Ingress IPv4 TCP 1 - 65535 141.142.216.0/21 - -
# Ingress IPv4 TCP 2376 141.142.0.0/16 - -
......
......@@ -44,16 +44,16 @@ write_files:
else
echo "Don't know how to install iscsi/nfs"
fi
echo "mounting taiga"
mkdir /taiga
#mount -av
echo "install docker"
curl https://releases.rancher.com/install-docker/20.10.sh | sh
curl https://releases.rancher.com/install-docker/24.0.sh | sh
systemctl enable docker
systemctl start docker
usermod -aG docker ${username}
echo "connect to rancher"
${node_command} ${node_options} ${node_labels}
echo "mounting taiga"
mkdir /taiga
#mount -av
echo "all done"
# run this command once the system is booted
......
......@@ -24,6 +24,28 @@ variable "cluster_machines" {
default = []
}
# ----------------------------------------------------------------------
# APPLICATIONS
# ----------------------------------------------------------------------
variable "monitoring_enabled" {
type = bool
description = "Enable monitoring in rancher"
default = true
}
variable "longhorn_enabled" {
type = bool
description = "Enable longhorn storage"
default = true
}
variable "longhorn_replicas" {
type = string
description = "Number of replicas"
default = 3
}
# ----------------------------------------------------------------------
# RANCHER
# ----------------------------------------------------------------------
......@@ -44,6 +66,7 @@ variable "rancher_token" {
variable "rke1_version" {
type = string
description = "Version of rke1 to install."
default = "v1.21.14-rancher1-1"
}
# ----------------------------------------------------------------------
......@@ -81,7 +104,7 @@ variable "member_groups" {
variable "openstack_url" {
type = string
description = "OpenStack URL"
default = "https://radiant.ncsa.illinois.edu:5000"
default = "https://radiant.ncsa.illinois.edu"
}
variable "openstack_credential_id" {
......@@ -102,10 +125,97 @@ variable "openstack_external_net" {
default = "ext-net"
}
# DEPRECATED, new key will always be created
variable "openstack_ssh_key" {
type = string
description = "existing SSH key to use, leave blank for a new one"
default = ""
}
# DEPRECATED
variable "openstack_zone" {
type = string
description = "default zone to use for openstack nodes"
default = "nova"
}
variable "openstack_security_kubernetes" {
type = map(any)
description = "IP address to allow connections to kube api port, default is rancher nodes"
default = {
"rancher-1" : "141.142.218.167/16"
"rancher-2" : "141.142.217.171/16"
"rancher-3" : "141.142.217.184/16"
}
}
variable "openstack_os_image" {
type = map(any)
description = "Map from short OS name to image"
default = {
"centos" = "CentOS-7-GenericCloud-Latest"
"ubuntu" = "Ubuntu Jammy (22.04) latest"
}
}
# ----------------------------------------------------------------------
# OPENSTACK NODES
# ----------------------------------------------------------------------
# DEPRECATED
variable "old_hostnames" {
type = bool
description = "should old hostname be used (base 0)"
default = false
}
# DEPRECATED
variable "os" {
type = string
description = "Base image to use for the OS"
default = "CentOS-7-GenericCloud-Latest"
}
# DEPRECATED
variable "controlplane_count" {
type = string
description = "Desired quantity of control-plane nodes"
default = 1
}
# DEPRECATED
variable "controlplane_flavor" {
type = string
description = "Desired flavor of control-plane nodes"
default = "m1.medium"
}
# DEPRECATED
variable "controlplane_disksize" {
type = string
description = "Desired disksize of control-plane nodes"
default = 40
}
# DEPRECATED
variable "worker_count" {
type = string
description = "Desired quantity of worker nodes"
default = 1
}
# DEPRECATED
variable "worker_flavor" {
type = string
description = "Desired flavor of worker nodes"
default = "m1.large"
}
# DEPRECATED
variable "worker_disksize" {
type = string
description = "IP address to allow connections to kube api port"
default = "141.142.0.0/16"
description = "Desired disksize of worker nodes"
default = 40
}
# ----------------------------------------------------------------------
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment