From 4698a9742a94dc0389a75b289d706d96bc2ec1f7 Mon Sep 17 00:00:00 2001
From: addetz <43963729+addetz@users.noreply.github.com>
Date: Tue, 7 Jan 2025 20:44:45 +0000
Subject: [PATCH 01/11] docs: add vmo cluster deployment DOC-1358
docs: add creation of virtual machine
docs: add test cases DOC-1358
docs: add readme DOC-1358
---
.gitleaksignore | 1 +
terraform/vmo-cluster/README.md | 74 +
terraform/vmo-cluster/cluster_profiles.tf | 68 +
terraform/vmo-cluster/clusters.tf | 52 +
terraform/vmo-cluster/data.tf | 57 +
terraform/vmo-cluster/inputs.tf | 148 +
.../vmo-cluster/manifests/cni-values.yaml | 3547 +++++++++++++++++
.../vmo-cluster/manifests/csi-values.yaml | 1317 ++++++
.../vmo-cluster/manifests/k8s-values.yaml | 118 +
.../vmo-cluster/manifests/ubuntu-values.yaml | 47 +
.../manifests/vmo-extras-manifest.yaml | 136 +
.../manifests/vmo-extras-values.yaml | 2 +
.../vmo-cluster/manifests/vmo-values.yaml | 600 +++
terraform/vmo-cluster/provider.tf | 29 +
terraform/vmo-cluster/terraform.tfvars | 26 +
.../maas-cluster-missing-values.tftest.hcl | 37 +
.../maas-cluster-replace-values.tftest.hcl | 37 +
.../tests/maas-cluster-vm.tftest.hcl | 42 +
.../tests/maas-cluster-zero-nodes.tftest.hcl | 31 +
.../vmo-cluster/tests/maas-cluster.tftest.hcl | 37 +
.../tests/project-palette.tftest.hcl | 16 +
.../vmo-cluster/virtual-machines/cloud-init | 22 +
terraform/vmo-cluster/virtual_machines.tf | 106 +
23 files changed, 6550 insertions(+)
create mode 100644 .gitleaksignore
create mode 100644 terraform/vmo-cluster/README.md
create mode 100644 terraform/vmo-cluster/cluster_profiles.tf
create mode 100644 terraform/vmo-cluster/clusters.tf
create mode 100644 terraform/vmo-cluster/data.tf
create mode 100644 terraform/vmo-cluster/inputs.tf
create mode 100644 terraform/vmo-cluster/manifests/cni-values.yaml
create mode 100644 terraform/vmo-cluster/manifests/csi-values.yaml
create mode 100644 terraform/vmo-cluster/manifests/k8s-values.yaml
create mode 100644 terraform/vmo-cluster/manifests/ubuntu-values.yaml
create mode 100644 terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
create mode 100644 terraform/vmo-cluster/manifests/vmo-extras-values.yaml
create mode 100644 terraform/vmo-cluster/manifests/vmo-values.yaml
create mode 100644 terraform/vmo-cluster/provider.tf
create mode 100644 terraform/vmo-cluster/terraform.tfvars
create mode 100644 terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
create mode 100644 terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
create mode 100644 terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
create mode 100644 terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
create mode 100644 terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
create mode 100644 terraform/vmo-cluster/tests/project-palette.tftest.hcl
create mode 100644 terraform/vmo-cluster/virtual-machines/cloud-init
create mode 100644 terraform/vmo-cluster/virtual_machines.tf
diff --git a/.gitleaksignore b/.gitleaksignore
new file mode 100644
index 0000000..d043ec7
--- /dev/null
+++ b/.gitleaksignore
@@ -0,0 +1 @@
+cbb0d660a09cc0c47851ef59ac7f0c5fca177371:terraform/vmo-cluster/manifests/k8s-values.yaml:generic-api-key:114
diff --git a/terraform/vmo-cluster/README.md b/terraform/vmo-cluster/README.md
new file mode 100644
index 0000000..f62ee05
--- /dev/null
+++ b/terraform/vmo-cluster/README.md
@@ -0,0 +1,74 @@
+# Deploy and Manage VMs using Palette VMO
+
+This folder contains the demo code for the **Deploy and Manage VMs using Palette VMO** tutorial.
+
+The Terraform code has two main toggle variables that you can use to deploy resources to [Canonical MAAS](https://maas.io/docs).
+
+| Variable | Provider | Description | Default |
+| ---------------- | -------- | ------------------------------------------------- | ------- |
+| `deploy-maas` | MAAS | Enable to deploy a cluster to MAAS. | `false` |
+| `deploy-maas-vm` | MAAS | Enable to deploy a VM to a deployed MAAS cluster. | `false` |
+
+
+To get started, open the **terraform.tfvars** file. Toggle the provider variable as specified in the table and provide values to your cloud provider variables, replacing all instances of the string `REPLACE ME`.
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.9 |
+| [local](#requirement\_local) | 2.4.1 |
+| [spectrocloud](#requirement\_spectrocloud) | >= 0.22.2 |
+| [tls](#requirement\_tls) | 4.0.4 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [local](#provider\_local) | 2.4.1 |
+| [spectrocloud](#provider\_spectrocloud) | 0.22.2 |
+| [tls](#provider\_tls) | 4.0.4 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [spectrocloud_cluster_profile.maas-vmo-profile](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_profile) | resource |
+| [spectrocloud_cluster_maas.maas-cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_maas) | resource |
+| [spectrocloud_virtual_machine.virtual-machine](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/virtual_machine) | resource |
+| [spectrocloud_cloudaccount_maas.account](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/cloudaccount_maas) | data source |
+| [spectrocloud_pack.maas_vmo](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_cni](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_csi](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_k8s](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_ubuntu](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
+| [spectrocloud_cluster.maas_vmo_cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/cluster) | data source |
+| [spectrocloud_registry.public_registry](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/registry) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [deploy-maas](#input\_deploy-maas) | A flag for enabling a cluster deployment on MAAS. | `bool` | n/a | yes |
+| [deploy-maas-vm](#input\_deploy-maas-vm) | A flag for enabling a VM creation on a MAAS cluster. | `bool` | n/a | yes |
+| [pcg-name](#input\_pcg-name) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
+| [maas-domain](#input\_maas-domain) | The MaaS domain that will be used to deploy the cluster. | `string` | n/a | yes |
+| [maas-worker-nodes](#input\_maas-worker-nodes) | The number of worker nodes that will be used to deploy the cluster. | `number` | 1 | yes |
+| [maas-control-plane-nodes](#input\_maas-control-plane-nodes) | The number of control plane nodes that will be used to deploy the cluster. | `number` | 1 | yes |
+| [maas-worker-resource-pool](#input\_maas-worker-resource-pool) | The resource pool to deploy the worker nodes to. | `string` | n/a | yes |
+| [maas-control-plane-resource-pool](#input\_maas-control-plane-resource-pool) | The resource pool to deploy the control plane nodes to. | `string` | n/a | yes |
+| [maas-worker-azs](#input\_maas-worker-azs) | The set of availability zones to deploy the worker nodes to. | `set(string)` | n/a | yes |
+| [maas-control-plane-azs](#input\_maas-control-plane-azs) | The set of availability zones to deploy the control plane nodes to. | `set(string)` | n/a | yes |
+| [maas-worker-node-tags](#input\_maas-worker-node-tags) | The set of tag values that you want to apply to all nodes in the node worker pool. | `set(string)` | n/a | yes |
+| [maas-control-plane-node-tags](#input\_maas-control-plane-node-tags) | The set of tag values that you want to apply to all nodes in the node control plane pool. | `set(string)` | n/a | yes |
+| [tags](#input\_tags) | The default tags to apply to Palette resources. | `list(string)` |
[
"spectro-cloud-education",
"spectrocloud:tutorials",
"terraform_managed:true",
"tutorial:vmo-cluster-deployment"
]
| no |
+
+## Outputs
+No outputs.
+
+
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
new file mode 100644
index 0000000..04bfbf7
--- /dev/null
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -0,0 +1,68 @@
+
+##########################
+# MAAS VMO Cluster Profile
+##########################
+resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
+ count = var.deploy-maas ? 1 : 0
+
+ name = "tf-maas-vmo-profile"
+ description = "A basic cluster profile for MAAS VMO"
+ tags = concat(var.tags, ["env:maas"])
+ cloud = "maas"
+ type = "cluster"
+ version = "1.0.0"
+
+ pack {
+ name = data.spectrocloud_pack.maas_ubuntu.name
+ tag = data.spectrocloud_pack.maas_ubuntu.version
+ uid = data.spectrocloud_pack.maas_ubuntu.id
+ values = file("manifests/ubuntu-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_k8s.name
+ tag = data.spectrocloud_pack.maas_k8s.version
+ uid = data.spectrocloud_pack.maas_k8s.id
+ values = file("manifests/k8s-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_cni.name
+ tag = data.spectrocloud_pack.maas_cni.version
+ uid = data.spectrocloud_pack.maas_cni.id
+ values = file("manifests/cni-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_csi.name
+ tag = data.spectrocloud_pack.maas_csi.version
+ uid = data.spectrocloud_pack.maas_csi.id
+ values = templatefile("manifests/csi-values.yaml", {
+ worker_nodes = var.maas-worker-nodes,
+ })
+ type = "spectro"
+ }
+
+ pack {
+ name = data.spectrocloud_pack.maas_vmo.name
+ tag = data.spectrocloud_pack.maas_vmo.version
+ uid = data.spectrocloud_pack.maas_vmo.id
+ values = file("manifests/vmo-values.yaml")
+ type = "spectro"
+ }
+
+ pack {
+ name = "vmo-extras"
+ type = "manifest"
+ tag = "1.0.0"
+ values = file("manifests/vmo-extras-values.yaml")
+ manifest {
+ name = "vmo-extras"
+ content = file("manifests/vmo-extras-manifest.yaml")
+ }
+ }
+
+}
diff --git a/terraform/vmo-cluster/clusters.tf b/terraform/vmo-cluster/clusters.tf
new file mode 100644
index 0000000..aecec17
--- /dev/null
+++ b/terraform/vmo-cluster/clusters.tf
@@ -0,0 +1,52 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+################
+# MAAS Cluster
+################
+
+resource "spectrocloud_cluster_maas" "maas-cluster" {
+ count = var.deploy-maas ? 1 : 0
+
+ name = "vmo-cluster-maas"
+ tags = concat(var.tags, ["env:maas"])
+ cloud_account_id = data.spectrocloud_cloudaccount_maas.account[0].id
+ pause_agent_upgrades = "unlock"
+
+ cloud_config {
+ domain = var.maas-domain
+ }
+
+ cluster_profile {
+ id = resource.spectrocloud_cluster_profile.maas-vmo-profile[0].id
+ }
+
+ machine_pool {
+ name = "maas-control-plane"
+ count = 1
+ control_plane = true
+ azs = var.maas-control-plane-azs
+ node_tags = var.maas-control-plane-node-tags
+ instance_type {
+ min_cpu = 8
+ min_memory_mb = 16000
+ }
+ placement {
+ resource_pool = var.maas-control-plane-resource-pool
+ }
+ }
+
+ machine_pool {
+ name = "maas-worker-basic"
+ count = 1
+ azs = var.maas-worker-azs
+ node_tags = var.maas-worker-node-tags
+ instance_type {
+ min_cpu = 8
+ min_memory_mb = 32000
+ }
+ placement {
+ resource_pool = var.maas-worker-resource-pool
+ }
+ }
+}
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
new file mode 100644
index 0000000..e820e86
--- /dev/null
+++ b/terraform/vmo-cluster/data.tf
@@ -0,0 +1,57 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+########################################
+# Data resources for the cluster profile
+########################################
+data "spectrocloud_registry" "public_registry" {
+ name = "Public Repo"
+}
+
+######
+# MAAS
+######
+
+data "spectrocloud_cloudaccount_maas" "account" {
+ count = var.deploy-maas ? 1 : 0
+ name = var.pcg-name
+}
+
+data "spectrocloud_pack" "maas_ubuntu" {
+ name = "ubuntu-maas"
+ version = "22.04"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_k8s" {
+ name = "kubernetes"
+ version = "1.30.6"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_cni" {
+ name = "cni-cilium-oss"
+ version = "1.15.3"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_csi" {
+ name = "csi-rook-ceph-helm"
+ version = "1.14.9"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_vmo" {
+ name = "virtual-machine-orchestrator"
+ version = "4.4.10"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_cluster" "maas_vmo_cluster" {
+ count = var.deploy-maas-vm ? 1 : 0
+ depends_on = [spectrocloud_cluster_maas.maas-cluster]
+ name = "vmo-cluster-maas"
+ context = "project"
+}
+
+
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
new file mode 100644
index 0000000..463089b
--- /dev/null
+++ b/terraform/vmo-cluster/inputs.tf
@@ -0,0 +1,148 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+#########
+# Palette
+#########
+
+variable "palette-project" {
+ type = string
+ description = "The name of your project in Palette."
+
+ validation {
+ condition = var.palette-project != ""
+ error_message = "Provide the correct Palette project."
+ }
+
+}
+
+######################
+# Common Configuration
+######################
+
+variable "tags" {
+ type = list(string)
+ description = "The default tags to apply to Palette resources."
+ default = [
+ "spectro-cloud-education",
+ "spectrocloud:tutorials",
+ "terraform_managed:true",
+ "tutorial:vmo-cluster-deployment"
+ ]
+}
+
+######
+# MAAS
+######
+
+variable "deploy-maas" {
+ type = bool
+ description = "A flag for enabling a deployment on MAAS."
+}
+
+variable "deploy-maas-vm" {
+ type = bool
+ description = "A flag for enabling a VM creation on the MAAS cluster."
+}
+
+variable "pcg-name" {
+ type = string
+ description = "The name of the PCG that will be used to deploy the cluster."
+
+ validation {
+ condition = var.deploy-maas ? var.pcg-name != "REPLACE ME" && var.pcg-name != "" : true
+ error_message = "Provide the correct MAAS PCG name."
+ }
+}
+
+variable "maas-domain" {
+ type = string
+ description = "MAAS domain"
+
+ validation {
+ condition = var.deploy-maas ? var.maas-domain != "REPLACE ME" && var.maas-domain != "" : true
+ error_message = "Provide the correct MAAS domain."
+ }
+}
+
+variable "maas-worker-nodes" {
+ type = number
+ description = "Number of MaaS worker nodes"
+ default = 1
+
+ validation {
+ condition = var.deploy-maas ? var.maas-worker-nodes > 0 : true
+ error_message = "Provide a valid number of worker nodes."
+ }
+}
+
+variable "maas-worker-resource-pool" {
+ type = string
+ description = "Resource pool for the MAAS worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.maas-worker-resource-pool != "REPLACE ME" && var.maas-worker-resource-pool != "" : true
+ error_message = "Provide a valid resource pool for worker nodes."
+ }
+}
+
+variable "maas-worker-azs" {
+ type = set(string)
+ description = "Set of AZs for the MAAS worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-worker-azs, "REPLACE ME") && length(var.maas-worker-azs) != 0 : true
+ error_message = "Provide a valid set of AZs for worker nodes."
+ }
+}
+
+variable "maas-worker-node-tags" {
+ type = set(string)
+ description = "Set of node tags for the MAAS worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-worker-node-tags, "REPLACE ME") && length(var.maas-worker-node-tags) != 0 : true
+ error_message = "Provide a valid set of node tags for worker nodes."
+ }
+}
+
+variable "maas-control-plane-nodes" {
+ type = number
+ description = "Number of MaaS control plane nodes"
+ default = 1
+
+ validation {
+ condition = var.deploy-maas ? var.maas-control-plane-nodes > 0 : true
+ error_message = "Provide a valid number of control plane nodes."
+ }
+}
+
+variable "maas-control-plane-resource-pool" {
+ type = string
+ description = "Resource pool for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.maas-control-plane-resource-pool != "REPLACE ME" && var.maas-control-plane-resource-pool != "" : true
+ error_message = "Provide a valid resource pool for worker nodes."
+ }
+}
+
+variable "maas-control-plane-azs" {
+ type = set(string)
+ description = "Set of AZs for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-control-plane-azs, "REPLACE ME") && length(var.maas-control-plane-azs) != 0 : true
+ error_message = "Provide a valid set of AZs for control plane nodes."
+ }
+}
+
+variable "maas-control-plane-node-tags" {
+ type = set(string)
+ description = "Set of node tags for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.maas-control-plane-node-tags, "REPLACE ME") && length(var.maas-control-plane-node-tags) != 0 : true
+ error_message = "Provide a valid set of node tags for control plane nodes."
+ }
+}
diff --git a/terraform/vmo-cluster/manifests/cni-values.yaml b/terraform/vmo-cluster/manifests/cni-values.yaml
new file mode 100644
index 0000000..897c048
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/cni-values.yaml
@@ -0,0 +1,3547 @@
+pack:
+ content:
+ images:
+ - image: quay.io/cilium/certgen:v0.1.9
+ - image: quay.io/cilium/cilium:v1.15.3
+ - image: quay.io/cilium/cilium-envoy:v1.27.3-99c1c8f42c8de70fc8f6dd594f4a425cd38b6688
+ - image: quay.io/cilium/cilium-etcd-operator:v2.0.7
+ - image: quay.io/cilium/clustermesh-apiserver:v1.15.3
+ - image: quay.io/cilium/hubble-relay:v1.15.3
+ - image: quay.io/cilium/hubble-ui:v0.13.0
+ - image: quay.io/cilium/hubble-ui-backend:v0.13.0
+ - image: quay.io/cilium/operator:v1.15.3
+ - image: quay.io/cilium/operator-generic:v1.15.3
+ - image: quay.io/cilium/operator-aws:v1.15.3
+ - image: quay.io/cilium/operator-azure:v1.15.3
+ - image: quay.io/cilium/startup-script:62093c5c233ea914bfa26a10ba41f8780d9b737f
+ - image: ghcr.io/spiffe/spire-agent:1.8.5
+ - image: ghcr.io/spiffe/spire-server:1.8.5
+ - image: docker.io/library/busybox:1.36.1
+
+ charts:
+ - repo: https://helm.cilium.io/
+ name: cilium
+ version: 1.15.3
+ #The namespace (on the target cluster) to install this chart
+ #When not found, a new namespace will be created
+ namespace: kube-system
+
+charts:
+ cilium:
+ # upgradeCompatibility helps users upgrading to ensure that the configMap for
+ # Cilium will not change critical values to ensure continued operation
+ # This flag is not required for new installations.
+ # For example: 1.7, 1.8, 1.9
+ # upgradeCompatibility: '1.8'
+
+ debug:
+ # -- Enable debug logging
+ enabled: false
+ # -- Configure verbosity levels for debug logging
+ # This option is used to enable debug messages for operations related to such
+ # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is
+ # for enabling debug messages emitted per request, message and connection.
+ # Multiple values can be set via a space-separated string (e.g. "datapath envoy").
+ #
+ # Applicable values:
+ # - flow
+ # - kvstore
+ # - envoy
+ # - datapath
+ # - policy
+ verbose: ~
+
+ rbac:
+ # -- Enable creation of Resource-Based Access Control configuration.
+ create: true
+
+ # -- Configure image pull secrets for pulling container images
+ imagePullSecrets:
+ # - name: "image-pull-secret"
+
+ # -- (string) Kubernetes config path
+ # @default -- `"~/.kube/config"`
+ kubeConfigPath: ""
+ # -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only)
+ k8sServiceHost: ""
+ # -- (string) Kubernetes service port
+ k8sServicePort: ""
+
+ # -- Configure the client side rate limit for the agent and operator
+ #
+ # If the amount of requests to the Kubernetes API server exceeds the configured
+ # rate limit, the agent and operator will start to throttle requests by delaying
+ # them until there is budget or the request times out.
+ k8sClientRateLimit:
+ # -- (int) The sustained request rate in requests per second.
+ # @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+
+ qps:
+ # -- (int) The burst request rate in requests per second.
+ # The rate limiter will allow short bursts with a higher rate.
+ # @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+
+ burst:
+
+ cluster:
+ # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE.
+ name: default
+ # -- (int) Unique ID of the cluster. Must be unique across all connected
+ # clusters and in the range of 1 to 255. Only required for Cluster Mesh,
+ # may be 0 if Cluster Mesh is not used.
+ id: 0
+
+ # -- Define serviceAccount names for components.
+ # @default -- Component's fully qualified name.
+ serviceAccounts:
+ cilium:
+ create: true
+ name: cilium
+ automount: true
+ annotations: {}
+ nodeinit:
+ create: true
+ # -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented.
+ # Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by
+ # this issue. Name and automount can be configured, if enabled is set to true.
+ # Otherwise, they are ignored. Enabled can be removed once the issue is fixed.
+ # Cilium-nodeinit DS must also be fixed.
+ enabled: false
+ name: cilium-nodeinit
+ automount: true
+ annotations: {}
+ envoy:
+ create: true
+ name: cilium-envoy
+ automount: true
+ annotations: {}
+ etcd:
+ create: true
+ name: cilium-etcd-operator
+ automount: true
+ annotations: {}
+ operator:
+ create: true
+ name: cilium-operator
+ automount: true
+ annotations: {}
+ preflight:
+ create: true
+ name: cilium-pre-flight
+ automount: true
+ annotations: {}
+ relay:
+ create: true
+ name: hubble-relay
+ automount: false
+ annotations: {}
+ ui:
+ create: true
+ name: hubble-ui
+ automount: true
+ annotations: {}
+ clustermeshApiserver:
+ create: true
+ name: clustermesh-apiserver
+ automount: true
+ annotations: {}
+ # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob
+ clustermeshcertgen:
+ create: true
+ name: clustermesh-apiserver-generate-certs
+ automount: true
+ annotations: {}
+ # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob
+ hubblecertgen:
+ create: true
+ name: hubble-generate-certs
+ automount: true
+ annotations: {}
+
+ # -- Configure termination grace period for cilium-agent DaemonSet.
+ terminationGracePeriodSeconds: 1
+
+ # -- Install the cilium agent resources.
+ agent: true
+
+ # -- Agent container name.
+ name: cilium
+
+ # -- Roll out cilium agent pods automatically when configmap is updated.
+ rollOutCiliumPods: false
+
+ # -- Agent container image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium"
+ tag: "v1.15.3"
+ pullPolicy: "IfNotPresent"
+ # cilium-digest
+ digest: ""
+ useDigest: false
+
+ # -- Affinity for cilium-agent.
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+
+ # -- Node selector for cilium-agent.
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for agent scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- The priority class to use for cilium-agent.
+ priorityClassName: ""
+
+ # -- DNS policy for Cilium agent pods.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ dnsPolicy: ""
+
+ # -- Additional containers added to the cilium DaemonSet.
+ extraContainers: []
+
+ # -- Additional agent container arguments.
+ extraArgs: []
+
+ # -- Additional agent container environment variables.
+ extraEnv: []
+
+ # -- Additional agent hostPath mounts.
+ extraHostPathMounts: []
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
+
+ # -- Additional agent volumes.
+ extraVolumes: []
+
+ # -- Additional agent volumeMounts.
+ extraVolumeMounts: []
+
+ # -- extraConfig allows you to specify additional configuration parameters to be
+ # included in the cilium-config configmap.
+ extraConfig: {}
+ # my-config-a: "1234"
+ # my-config-b: |-
+ # test 1
+ # test 2
+ # test 3
+
+ # -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent)
+ annotations: {}
+
+ # -- Security Context for cilium-agent pods.
+ podSecurityContext: {}
+
+ # -- Annotations to be added to agent pods
+ podAnnotations: {}
+
+ # -- Labels to be added to agent pods
+ podLabels: {}
+
+ # -- Agent resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ # -- resources & limits for the agent init containers
+ initResources: {}
+
+ securityContext:
+ # -- User to run the pod with
+ # runAsUser: 0
+ # -- Run the pod with elevated privileges
+ privileged: false
+ # -- SELinux options for the `cilium-agent` and init containers
+ seLinuxOptions:
+ level: 's0'
+ # Running with spc_t since we have removed the privileged mode.
+ # Users can change it to a different type as long as they have the
+ # type available on the system.
+ type: 'spc_t'
+ capabilities:
+ # -- Capabilities for the `cilium-agent` container
+ ciliumAgent:
+ # Use to set socket permission
+ - CHOWN
+ # Used to terminate envoy child process
+ - KILL
+ # Used since cilium modifies routing tables, etc...
+ - NET_ADMIN
+ # Used since cilium creates raw sockets, etc...
+ - NET_RAW
+ # Used since cilium monitor uses mmap
+ - IPC_LOCK
+ # Used in iptables. Consider removing once we are iptables-free
+ - SYS_MODULE
+ # We need it for now but might not need it for >= 5.11 specially
+ # for the 'SYS_RESOURCE'.
+ # In >= 5.8 there's already BPF and PERMON capabilities
+ - SYS_ADMIN
+ # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
+ - SYS_RESOURCE
+ # Both PERFMON and BPF requires kernel 5.8, container runtime
+ # cri-o >= v1.22.0 or containerd >= v1.5.0.
+ # If available, SYS_ADMIN can be removed.
+ #- PERFMON
+ #- BPF
+ # Allow discretionary access control (e.g. required for package installation)
+ - DAC_OVERRIDE
+ # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation)
+ - FOWNER
+ # Allow to execute program that changes GID (e.g. required for package installation)
+ - SETGID
+ # Allow to execute program that changes UID (e.g. required for package installation)
+ - SETUID
+ # -- Capabilities for the `mount-cgroup` init container
+ mountCgroup:
+ # Only used for 'mount' cgroup
+ - SYS_ADMIN
+ # Used for nsenter
+ - SYS_CHROOT
+ - SYS_PTRACE
+ # -- capabilities for the `apply-sysctl-overwrites` init container
+ applySysctlOverwrites:
+ # Required in order to access host's /etc/sysctl.d dir
+ - SYS_ADMIN
+ # Used for nsenter
+ - SYS_CHROOT
+ - SYS_PTRACE
+ # -- Capabilities for the `clean-cilium-state` init container
+ cleanCiliumState:
+ # Most of the capabilities here are the same ones used in the
+ # cilium-agent's container because this container can be used to
+ # uninstall all Cilium resources, and therefore it is likely that
+ # will need the same capabilities.
+ # Used since cilium modifies routing tables, etc...
+ - NET_ADMIN
+ # Used in iptables. Consider removing once we are iptables-free
+ - SYS_MODULE
+ # We need it for now but might not need it for >= 5.11 specially
+ # for the 'SYS_RESOURCE'.
+ # In >= 5.8 there's already BPF and PERMON capabilities
+ - SYS_ADMIN
+ # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
+ - SYS_RESOURCE
+ # Both PERFMON and BPF requires kernel 5.8, container runtime
+ # cri-o >= v1.22.0 or containerd >= v1.5.0.
+ # If available, SYS_ADMIN can be removed.
+ #- PERFMON
+ #- BPF
+
+ # -- Cilium agent update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 2
+
+ # Configuration Values for cilium-agent
+
+ aksbyocni:
+ # -- Enable AKS BYOCNI integration.
+ # Note that this is incompatible with AKS clusters not created in BYOCNI mode:
+ # use Azure integration (`azure.enabled`) instead.
+ enabled: false
+
+ # -- Enable installation of PodCIDR routes between worker
+ # nodes if worker nodes share a common L2 network segment.
+ autoDirectNodeRoutes: false
+
+ # -- Annotate k8s node upon initialization with Cilium's metadata.
+ annotateK8sNode: false
+
+ azure:
+ # -- Enable Azure integration.
+ # Note that this is incompatible with AKS clusters created in BYOCNI mode: use
+ # AKS BYOCNI integration (`aksbyocni.enabled`) instead.
+ enabled: false
+ # usePrimaryAddress: false
+ # resourceGroup: group1
+ # subscriptionID: 00000000-0000-0000-0000-000000000000
+ # tenantID: 00000000-0000-0000-0000-000000000000
+ # clientID: 00000000-0000-0000-0000-000000000000
+ # clientSecret: 00000000-0000-0000-0000-000000000000
+ # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000
+
+ alibabacloud:
+ # -- Enable AlibabaCloud ENI integration
+ enabled: false
+
+ # -- Enable bandwidth manager to optimize TCP and UDP workloads and allow
+ # for rate-limiting traffic from individual Pods with EDT (Earliest Departure
+ # Time) through the "kubernetes.io/egress-bandwidth" Pod annotation.
+ bandwidthManager:
+ # -- Enable bandwidth manager infrastructure (also prerequirement for BBR)
+ enabled: false
+ # -- Activate BBR TCP congestion control for Pods
+ bbr: false
+
+ # -- Configure standalone NAT46/NAT64 gateway
+ nat46x64Gateway:
+ # -- Enable RFC8215-prefixed translation
+ enabled: false
+
+ # -- EnableHighScaleIPcache enables the special ipcache mode for high scale
+ # clusters. The ipcache content will be reduced to the strict minimum and
+ # traffic will be encapsulated to carry security identities.
+ highScaleIPcache:
+ # -- Enable the high scale mode for the ipcache.
+ enabled: false
+
+ # -- Configure L2 announcements
+ l2announcements:
+ # -- Enable L2 announcements
+ enabled: false
+ # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked
+ # leaseDuration: 15s
+ # -- The interval at which the leader will renew the lease
+ # leaseRenewDeadline: 5s
+ # -- The timeout between retries if renewal fails
+ # leaseRetryPeriod: 2s
+
+ # -- Configure L2 pod announcements
+ l2podAnnouncements:
+ # -- Enable L2 pod announcements
+ enabled: false
+ # -- Interface used for sending Gratuitous ARP pod announcements
+ interface: "eth0"
+
+ # -- Configure BGP
+ bgp:
+ # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside
+ # cilium-agent and cilium-operator
+ enabled: false
+ announce:
+ # -- Enable allocation and announcement of service LoadBalancer IPs
+ loadbalancerIP: false
+ # -- Enable announcement of node pod CIDR
+ podCIDR: false
+
+ # -- This feature set enables virtual BGP routers to be created via
+ # CiliumBGPPeeringPolicy CRDs.
+ bgpControlPlane:
+ # -- Enables the BGP control plane.
+ enabled: false
+ # -- SecretsNamespace is the namespace which BGP support will retrieve secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for BGP secrets.
+ create: false
+ # -- The name of the secret namespace to which Cilium agents are given read access
+ name: kube-system
+
+ pmtuDiscovery:
+ # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to
+ # the client.
+ enabled: false
+
+ bpf:
+ autoMount:
+ # -- Enable automatic mount of BPF filesystem
+ # When `autoMount` is enabled, the BPF filesystem is mounted at
+ # `bpf.root` path on the underlying host and inside the cilium agent pod.
+ # If users disable `autoMount`, it's expected that users have mounted
+ # bpffs filesystem at the specified `bpf.root` volume, and then the
+ # volume will be mounted inside the cilium agent pod at the same path.
+ enabled: true
+ # -- Configure the mount point for the BPF filesystem
+ root: /sys/fs/bpf
+
+ # -- Enables pre-allocation of eBPF map values. This increases
+ # memory usage but can reduce latency.
+ preallocateMaps: false
+
+ # -- (int) Configure the maximum number of entries in auth map.
+ # @default -- `524288`
+ authMapMax: ~
+
+ # -- (int) Configure the maximum number of entries in the TCP connection tracking
+ # table.
+ # @default -- `524288`
+ ctTcpMax: ~
+
+ # -- (int) Configure the maximum number of entries for the non-TCP connection
+ # tracking table.
+ # @default -- `262144`
+ ctAnyMax: ~
+
+ # -- Configure the maximum number of service entries in the
+ # load balancer maps.
+ lbMapMax: 65536
+
+ # -- (int) Configure the maximum number of entries for the NAT table.
+ # @default -- `524288`
+ natMax: ~
+
+ # -- (int) Configure the maximum number of entries for the neighbor table.
+ # @default -- `524288`
+ neighMax: ~
+
+ # -- Configure the maximum number of entries in endpoint policy map (per endpoint).
+ policyMapMax: 16384
+
+ # -- (float64) Configure auto-sizing for all BPF maps based on available memory.
+ # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/
+ # @default -- `0.0025`
+ mapDynamicSizeRatio: ~
+
+ # -- Configure the level of aggregation for monitor notifications.
+ # Valid options are none, low, medium, maximum.
+ monitorAggregation: medium
+
+ # -- Configure the typical time between monitor notifications for
+ # active connections.
+ monitorInterval: "5s"
+
+ # -- Configure which TCP flags trigger notifications when seen for the
+ # first time in a connection.
+ monitorFlags: "all"
+
+ # -- Allow cluster external access to ClusterIP services.
+ lbExternalClusterIP: false
+
+ # -- (bool) Enable native IP masquerade support in eBPF
+ # @default -- `false`
+ masquerade: ~
+
+ # -- (bool) Configure whether direct routing mode should route traffic via
+ # host stack (true) or directly and more efficiently out of BPF (false) if
+ # the kernel supports it. The latter has the implication that it will also
+ # bypass netfilter in the host namespace.
+ # @default -- `false`
+ hostLegacyRouting: ~
+
+ # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules
+ # for implementing Layer 7 policy.
+ # @default -- `false`
+ tproxy: ~
+
+ # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass.
+ # [0] will allow all VLAN id's without any filtering.
+ # @default -- `[]`
+ vlanBypass: ~
+
+ # -- Enable BPF clock source probing for more efficient tick retrieval.
+ bpfClockProbe: false
+
+ # -- Clean all eBPF datapath state from the initContainer of the cilium-agent
+ # DaemonSet.
+ #
+ # WARNING: Use with care!
+ cleanBpfState: false
+
+ # -- Clean all local Cilium state from the initContainer of the cilium-agent
+ # DaemonSet. Implies cleanBpfState: true.
+ #
+ # WARNING: Use with care!
+ cleanState: false
+
+ # -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy"
+ # init container before launching cilium-agent.
+ # More context can be found in the commit message of below PR
+ # https://github.com/cilium/cilium/pull/20123
+ waitForKubeProxy: false
+
+ cni:
+ # -- Install the CNI configuration and binary files into the filesystem.
+ install: true
+
+ # -- Remove the CNI configuration and binary files on agent shutdown. Enable this
+ # if you're removing Cilium from the cluster. Disable this to prevent the CNI
+ # configuration file from being removed during agent upgrade, which can cause
+ # nodes to go unmanageable.
+ uninstall: false
+
+ # -- Configure chaining on top of other CNI plugins. Possible values:
+ # - none
+ # - aws-cni
+ # - flannel
+ # - generic-veth
+ # - portmap
+ chainingMode: ~
+
+ # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin.
+ # This will cause the agent to watch for a CNI network with this network name. When it is
+ # found, this will be used as the basis for Cilium's CNI configuration file. If this is
+ # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode
+ # of aws-cni implies a chainingTarget of aws-cni.
+ chainingTarget: ~
+
+ # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the
+ # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
+ # This ensures no Pods can be scheduled using other CNI plugins during Cilium
+ # agent downtime.
+ exclusive: false
+
+ # -- Configure the log file for CNI logging with retention policy of 7 days.
+ # Disable CNI file logging by setting this field to empty explicitly.
+ logFile: /var/run/cilium/cilium-cni.log
+
+ # -- Skip writing of the CNI configuration. This can be used if
+ # writing of the CNI configuration is performed by external automation.
+ customConf: false
+
+ # -- Configure the path to the CNI configuration directory on the host.
+ confPath: /etc/cni/net.d
+
+ # -- Configure the path to the CNI binary directory on the host.
+ binPath: /opt/cni/bin
+
+ # -- Specify the path to a CNI config to read from on agent start.
+ # This can be useful if you want to manage your CNI
+ # configuration outside of a Kubernetes environment. This parameter is
+ # mutually exclusive with the 'cni.configMap' parameter. The agent will
+ # write this to 05-cilium.conflist on startup.
+ # readCniConf: /host/etc/cni/net.d/05-sample.conflist.input
+
+ # -- When defined, configMap will mount the provided value as ConfigMap and
+ # interpret the cniConf variable as CNI configuration file and write it
+ # when the agent starts up
+ # configMap: cni-configuration
+
+ # -- Configure the key in the CNI ConfigMap to read the contents of
+ # the CNI configuration from.
+ configMapKey: cni-config
+
+ # -- Configure the path to where to mount the ConfigMap inside the agent pod.
+ confFileMountPath: /tmp/cni-configuration
+
+ # -- Configure the path to where the CNI configuration directory is mounted
+ # inside the agent pod.
+ hostConfDirMountPath: /host/etc/cni/net.d
+
+ # -- Specifies the resources for the cni initContainer
+ resources:
+ requests:
+ cpu: 100m
+ memory: 10Mi
+
+ # -- (string) Configure how frequently garbage collection should occur for the datapath
+ # connection tracking table.
+ # @default -- `"0s"`
+ conntrackGCInterval: ""
+
+ # -- (string) Configure the maximum frequency for the garbage collection of the
+ # connection tracking table. Only affects the automatic computation for the frequency
+ # and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently
+ # clean up unused identities created from ToFQDN policies.
+ conntrackGCMaxInterval: ""
+
+ # -- Configure container runtime specific integration.
+ # Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15.
+ containerRuntime:
+ # -- Enables specific integrations for container runtimes.
+ # Supported values:
+ # - crio
+ # - none
+ integration: none
+
+ # -- (string) Configure timeout in which Cilium will exit if CRDs are not available
+ # @default -- `"5m"`
+ crdWaitTimeout: ""
+
+ # -- Tail call hooks for custom eBPF programs.
+ customCalls:
+ # -- Enable tail call hooks for custom eBPF programs.
+ enabled: false
+
+ daemon:
+ # -- Configure where Cilium runtime state should be stored.
+ runPath: "/var/run/cilium"
+
+ # -- Configure a custom list of possible configuration override sources
+ # The default is "config-map:cilium-config,cilium-node-config". For supported
+ # values, see the help text for the build-config subcommand.
+ # Note that this value should be a comma-separated string.
+ configSources: ~
+
+ # -- allowedConfigOverrides is a list of config-map keys that can be overridden.
+ # That is to say, if this value is set, config sources (excepting the first one) can
+ # only override keys in this list.
+ #
+ # This takes precedence over blockedConfigOverrides.
+ #
+ # By default, all keys may be overridden. To disable overrides, set this to "none" or
+ # change the configSources variable.
+ allowedConfigOverrides: ~
+
+ # -- blockedConfigOverrides is a list of config-map keys that may not be overridden.
+ # In other words, if any of these keys appear in a configuration source excepting the
+ # first one, they will be ignored
+ #
+ # This is ignored if allowedConfigOverrides is set.
+ #
+ # By default, all keys may be overridden.
+ blockedConfigOverrides: ~
+
+ # -- Specify which network interfaces can run the eBPF datapath. This means
+ # that a packet sent from a pod to a destination outside the cluster will be
+ # masqueraded (to an output device IPv4 address), if the output device runs the
+ # program. When not specified, probing will automatically detect devices that have
+ # a non-local route. This should be used only when autodetection is not suitable.
+ # devices: ""
+
+ # -- Enables experimental support for the detection of new and removed datapath
+ # devices. When devices change the eBPF datapath is reloaded and services updated.
+ # If "devices" is set then only those devices, or devices matching a wildcard will
+ # be considered.
+ enableRuntimeDeviceDetection: false
+
+ # -- Chains to ignore when installing feeder rules.
+ # disableIptablesFeederRules: ""
+
+ # -- Limit iptables-based egress masquerading to interface selector.
+ # egressMasqueradeInterfaces: ""
+
+ # -- Enable setting identity mark for local traffic.
+ # enableIdentityMark: true
+
+ # -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it.
+ # enableK8sEndpointSlice: true
+
+ # -- Enable CiliumEndpointSlice feature.
+ enableCiliumEndpointSlice: false
+
+ envoyConfig:
+ # -- Enable CiliumEnvoyConfig CRD
+ # CiliumEnvoyConfig CRD can also be implicitly enabled by other options.
+ enabled: false
+
+ # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for CiliumEnvoyConfig CRDs.
+ create: true
+
+ # -- The name of the secret namespace to which Cilium agents are given read access.
+ name: cilium-secrets
+
+ ingressController:
+ # -- Enable cilium ingress controller
+ # This will automatically set enable-envoy-config as well.
+ enabled: false
+
+ # -- Set cilium ingress controller to be the default ingress controller
+ # This will let cilium ingress controller route entries without ingress class set
+ default: false
+
+ # -- Default ingress load balancer mode
+ # Supported values: shared, dedicated
+ # For granular control, use the following annotations on the ingress resource
+ # ingress.cilium.io/loadbalancer-mode: shared|dedicated,
+ loadbalancerMode: dedicated
+
+ # -- Enforce https for host having matching TLS host in Ingress.
+ # Incoming traffic to http listener will return 308 http error code with respective location in header.
+ enforceHttps: true
+
+ # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled.
+ enableProxyProtocol: false
+
+ # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service
+ ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com']
+
+ # -- Default secret namespace for ingresses without .spec.tls[].secretName set.
+ defaultSecretNamespace:
+
+ # -- Default secret name for ingresses without .spec.tls[].secretName set.
+ defaultSecretName:
+
+ # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for Ingress.
+ create: true
+
+ # -- Name of Ingress secret namespace.
+ name: cilium-secrets
+
+ # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
+ # If disabled, TLS secrets must be maintained externally.
+ sync: true
+
+ # -- Load-balancer service in shared mode.
+ # This is a single load-balancer service for all Ingress resources.
+ service:
+ # -- Service name
+ name: cilium-ingress
+ # -- Labels to be added for the shared LB service
+ labels: {}
+ # -- Annotations to be added for the shared LB service
+ annotations: {}
+ # -- Service type for the shared LB service
+ type: LoadBalancer
+ # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service
+ insecureNodePort: ~
+ # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service
+ secureNodePort : ~
+ # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+)
+ loadBalancerClass: ~
+ # -- Configure a specific loadBalancerIP on the shared LB service
+ loadBalancerIP : ~
+ # -- Configure if node port allocation is required for LB service
+ # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation
+ allocateLoadBalancerNodePorts: ~
+
+ gatewayAPI:
+ # -- Enable support for Gateway API in cilium
+ # This will automatically set enable-envoy-config as well.
+ enabled: false
+
+ # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
+ secretsNamespace:
+ # -- Create secrets namespace for Gateway API.
+ create: true
+
+ # -- Name of Gateway API secret namespace.
+ name: cilium-secrets
+
+ # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
+ # If disabled, TLS secrets must be maintained externally.
+ sync: true
+
+ # -- Enables the fallback compatibility solution for when the xt_socket kernel
+ # module is missing and it is needed for the datapath L7 redirection to work
+ # properly. See documentation for details on when this can be disabled:
+ # https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel.
+ enableXTSocketFallback: true
+
+ encryption:
+ # -- Enable transparent network encryption.
+ enabled: false
+
+ # -- Encryption method. Can be either ipsec or wireguard.
+ type: ipsec
+
+ # -- Enable encryption for pure node to node traffic.
+ # This option is only effective when encryption.type is set to "wireguard".
+ nodeEncryption: false
+
+ # -- Configure the WireGuard Pod2Pod strict mode.
+ strictMode:
+ # -- Enable WireGuard Pod2Pod strict mode.
+ enabled: false
+
+ # -- CIDR for the WireGuard Pod2Pod strict mode.
+ cidr: ""
+
+ # -- Allow dynamic lookup of remote node identities.
+ # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap.
+ allowRemoteNodeIdentities: false
+
+ ipsec:
+ # -- Name of the key file inside the Kubernetes secret configured via secretName.
+ keyFile: ""
+
+ # -- Path to mount the secret inside the Cilium pod.
+ mountPath: ""
+
+ # -- Name of the Kubernetes secret containing the encryption keys.
+ secretName: ""
+
+ # -- The interface to use for encrypted traffic.
+ interface: ""
+
+ # -- Enable the key watcher. If disabled, a restart of the agent will be
+ # necessary on key rotations.
+ keyWatcher: true
+
+ # -- Maximum duration of the IPsec key rotation. The previous key will be
+ # removed after that delay.
+ keyRotationDuration: "5m"
+
+ wireguard:
+ # -- Enables the fallback to the user-space implementation.
+ userspaceFallback: false
+ # -- Controls Wireguard PersistentKeepalive option. Set 0s to disable.
+ persistentKeepalive: 0s
+
+ # -- Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15.
+ # Name of the key file inside the Kubernetes secret configured via secretName.
+ # This option is only effective when encryption.type is set to ipsec.
+ keyFile: keys
+
+ # -- Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15.
+ # Path to mount the secret inside the Cilium pod.
+ # This option is only effective when encryption.type is set to ipsec.
+ mountPath: /etc/ipsec
+
+ # -- Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15.
+ # Name of the Kubernetes secret containing the encryption keys.
+ # This option is only effective when encryption.type is set to ipsec.
+ secretName: cilium-ipsec-keys
+
+ # -- Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15.
+ # The interface to use for encrypted traffic.
+ # This option is only effective when encryption.type is set to ipsec.
+ interface: ""
+
+ endpointHealthChecking:
+ # -- Enable connectivity health checking between virtual endpoints.
+ enabled: true
+
+ # -- Enable endpoint status.
+ # Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space.
+ endpointStatus:
+ enabled: false
+ status: ""
+
+ endpointRoutes:
+ # -- Enable use of per endpoint routes instead of routing via
+ # the cilium_host interface.
+ enabled: false
+
+ k8sNetworkPolicy:
+ # -- Enable support for K8s NetworkPolicy
+ enabled: true
+
+ eni:
+ # -- Enable Elastic Network Interface (ENI) integration.
+ enabled: false
+ # -- Update ENI Adapter limits from the EC2 API
+ updateEC2AdapterLimitViaAPI: true
+ # -- Release IPs not used from the ENI
+ awsReleaseExcessIPs: false
+ # -- Enable ENI prefix delegation
+ awsEnablePrefixDelegation: false
+ # -- EC2 API endpoint to use
+ ec2APIEndpoint: ""
+ # -- Tags to apply to the newly created ENIs
+ eniTags: {}
+ # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable.
+ # @default -- `"5m"`
+ gcInterval: ""
+ # -- Additional tags attached to ENIs created by Cilium.
+ # Dangling ENIs with this tag will be garbage collected
+ # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}`
+ gcTags: {}
+ # -- If using IAM role for Service Accounts will not try to
+ # inject identity values from cilium-aws kubernetes secret.
+ # Adds annotation to service account if managed by Helm.
+ # See https://github.com/aws/amazon-eks-pod-identity-webhook
+ iamRole: ""
+ # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs
+ # Important note: This requires that each instance has an ENI with a matching subnet attached
+ # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
+ # use the CNI configuration file settings (cni.customConf) instead.
+ subnetIDsFilter: []
+ # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs
+ # Important note: This requires that each instance has an ENI with a matching subnet attached
+ # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
+ # use the CNI configuration file settings (cni.customConf) instead.
+ subnetTagsFilter: []
+ # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances
+ # are going to be used to create new ENIs
+ instanceTagsFilter: []
+
+ externalIPs:
+ # -- Enable ExternalIPs service support.
+ enabled: false
+
+ # fragmentTracking enables IPv4 fragment tracking support in the datapath.
+ # fragmentTracking: true
+
+ gke:
+ # -- Enable Google Kubernetes Engine integration
+ enabled: false
+
+ # -- Enable connectivity health checking.
+ healthChecking: true
+
+ # -- TCP port for the agent health API. This is not the port for cilium-health.
+ healthPort: 9879
+
+ # -- Configure the host firewall.
+ hostFirewall:
+ # -- Enables the enforcement of host policies in the eBPF datapath.
+ enabled: false
+
+ hostPort:
+ # -- Enable hostPort service support.
+ enabled: false
+
+ # -- Configure socket LB
+ socketLB:
+ # -- Enable socket LB
+ enabled: false
+
+ # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules.
+ # hostNamespaceOnly: false
+
+ # -- Configure certificate generation for Hubble integration.
+ # If hubble.tls.auto.method=cronJob, these values are used
+ # for the Kubernetes CronJob which will be scheduled regularly to
+ # (re)generate any certificates not provided manually.
+ certgen:
+ image:
+ override: ~
+ repository: "quay.io/cilium/certgen"
+ tag: "v0.1.9"
+ digest: "sha256:89a0847753686444daabde9474b48340993bd19c7bea66a46e45b2974b82041f"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # -- Seconds after which the completed job pod will be deleted
+ ttlSecondsAfterFinished: 1800
+ # -- Labels to be added to hubble-certgen pods
+ podLabels: {}
+ # -- Annotations to be added to the hubble-certgen initial Job and CronJob
+ annotations:
+ job: {}
+ cronJob: {}
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- Additional certgen volumes.
+ extraVolumes: []
+
+ # -- Additional certgen volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Affinity for certgen
+ affinity: {}
+
+ hubble:
+ # -- Enable Hubble (true by default).
+ enabled: true
+
+ # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble)
+ annotations: {}
+
+ # -- Buffer size of the channel Hubble uses to receive monitor events. If this
+ # value is not set, the queue size is set to the default monitor queue size.
+ # eventQueueSize: ""
+
+ # -- Number of recent flows for Hubble to cache. Defaults to 4095.
+ # Possible values are:
+ # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023,
+ # 2047, 4095, 8191, 16383, 32767, 65535
+ # eventBufferCapacity: "4095"
+
+ # -- Hubble metrics configuration.
+ # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics
+ # for more comprehensive documentation about Hubble metrics.
+ metrics:
+ # -- Configures the list of metrics to collect. If empty or null, metrics
+ # are disabled.
+ # Example:
+ #
+ # enabled:
+ # - dns:query;ignoreAAAA
+ # - drop
+ # - tcp
+ # - flow
+ # - icmp
+ # - http
+ #
+ # You can specify the list of metrics from the helm CLI:
+ #
+ # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}"
+ #
+ enabled: ~
+ # -- Enables exporting hubble metrics in OpenMetrics format.
+ enableOpenMetrics: false
+ # -- Configure the port the hubble metric server listens on.
+ port: 9965
+ # -- Annotations to be added to hubble-metrics service.
+ serviceAnnotations: {}
+ serviceMonitor:
+ # -- Create ServiceMonitor resources for Prometheus Operator.
+ # This requires the prometheus CRDs to be available.
+ # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor hubble
+ labels: {}
+ # -- Annotations to add to ServiceMonitor hubble
+ annotations: {}
+ # -- jobLabel to add for ServiceMonitor hubble
+ jobLabel: ""
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor hubble
+ relabelings:
+ - sourceLabels:
+ - __meta_kubernetes_pod_node_name
+ targetLabel: node
+ replacement: ${1}
+ # -- Metrics relabeling configs for the ServiceMonitor hubble
+ metricRelabelings: ~
+ # -- Grafana dashboards for hubble
+ # grafana can import dashboards based on the label and value
+ # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
+ dashboards:
+ enabled: false
+ label: grafana_dashboard
+ namespace: ~
+ labelValue: "1"
+ annotations: {}
+
+ # -- Unix domain socket path to listen to when Hubble is enabled.
+ socketPath: /var/run/cilium/hubble.sock
+
+ # -- Enables redacting sensitive information present in Layer 7 flows.
+ redact:
+ enabled: false
+ http:
+ # -- Enables redacting URL query (GET) parameters.
+ # Example:
+ #
+ # redact:
+ # enabled: true
+ # http:
+ # urlQuery: true
+ #
+ # You can specify the options from the helm CLI:
+ #
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.urlQuery="true"
+ urlQuery: false
+ # -- Enables redacting user info, e.g., password when basic auth is used.
+ # Example:
+ #
+ # redact:
+ # enabled: true
+ # http:
+ # userInfo: true
+ #
+ # You can specify the options from the helm CLI:
+ #
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.userInfo="true"
+ userInfo: true
+ headers:
+ # -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present.
+ # Example:
+ # redact:
+ # enabled: true
+ # http:
+ # headers:
+ # allow:
+ # - traceparent
+ # - tracestate
+ # - Cache-Control
+ #
+ # You can specify the options from the helm CLI:
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control"
+ allow: []
+ # -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present.
+ # Example:
+ # redact:
+ # enabled: true
+ # http:
+ # headers:
+ # deny:
+ # - Authorization
+ # - Proxy-Authorization
+ #
+ # You can specify the options from the helm CLI:
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization"
+ deny: []
+ kafka:
+ # -- Enables redacting Kafka's API key.
+ # Example:
+ #
+ # redact:
+ # enabled: true
+ # kafka:
+ # apiKey: true
+ #
+ # You can specify the options from the helm CLI:
+ #
+ # --set hubble.redact.enabled="true"
+ # --set hubble.redact.kafka.apiKey="true"
+ apiKey: false
+
+ # -- An additional address for Hubble to listen to.
+ # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that
+ # Hubble is listening on port 4244.
+ listenAddress: ":4244"
+ # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available.
+ preferIpv6: false
+ # -- (bool) Skip Hubble events with unknown cgroup ids
+ # @default -- `true`
+ skipUnknownCGroupIDs: ~
+
+ peerService:
+ # -- Service Port for the Peer service.
+ # If not set, it is dynamically assigned to port 443 if TLS is enabled and to
+ # port 80 if not.
+ # servicePort: 80
+ # -- Target Port for the Peer service, must match the hubble.listenAddress'
+ # port.
+ targetPort: 4244
+ # -- The cluster domain to use to query the Hubble Peer service. It should
+ # be the local cluster.
+ clusterDomain: cluster.local
+ # -- TLS configuration for Hubble
+ tls:
+ # -- Enable mutual TLS for listenAddress. Setting this value to false is
+ # highly discouraged as the Hubble API provides access to potentially
+ # sensitive network flow metadata and is exposed on the host network.
+ enabled: true
+ # -- Configure automatic TLS certificates generation.
+ auto:
+ # -- Auto-generate certificates.
+ # When set to true, automatically generate a CA and certificates to
+ # enable mTLS between Hubble server and Hubble Relay instances. If set to
+ # false, the certs for Hubble server need to be provided by setting
+ # appropriate values below.
+ enabled: true
+ # -- Set the method to auto-generate certificates. Supported values:
+ # - helm: This method uses Helm to generate all certificates.
+ # - cronJob: This method uses a Kubernetes CronJob the generate any
+ # certificates not provided by the user at installation
+ # time.
+ # - certmanager: This method use cert-manager to generate & rotate certificates.
+ method: helm
+ # -- Generated certificates validity duration in days.
+ certValidityDuration: 1095
+ # -- Schedule for certificates regeneration (regardless of their expiration date).
+ # Only used if method is "cronJob". If nil, then no recurring job will be created.
+ # Instead, only the one-shot job is deployed to generate the certificates at
+ # installation time.
+ #
+ # Defaults to midnight of the first day of every fourth month. For syntax, see
+ # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
+ schedule: "0 0 1 */4 *"
+
+ # [Example]
+ # certManagerIssuerRef:
+ # group: cert-manager.io
+ # kind: ClusterIssuer
+ # name: ca-issuer
+ # -- certmanager issuer used when hubble.tls.auto.method=certmanager.
+ certManagerIssuerRef: {}
+
+ # -- base64 encoded PEM values for the Hubble server certificate and private key
+ server:
+ cert: ""
+ key: ""
+ # -- Extra DNS names added to certificate when it's auto generated
+ extraDnsNames: []
+ # -- Extra IP addresses added to certificate when it's auto generated
+ extraIpAddresses: []
+
+ relay:
+ # -- Enable Hubble Relay (requires hubble.enabled=true)
+ enabled: false
+
+ # -- Roll out Hubble Relay pods automatically when configmap is updated.
+ rollOutPods: false
+
+ # -- Hubble-relay container image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/hubble-relay"
+ tag: "v1.15.3"
+ # hubble-relay-digest
+ digest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+
+ # -- Specifies the resources for the hubble-relay pods
+ resources: {}
+
+ # -- Number of replicas run for the hubble-relay deployment.
+ replicas: 1
+
+ # -- Affinity for hubble-replay
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+
+ # -- Pod topology spread constraints for hubble-relay
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- Additional hubble-relay environment variables.
+ extraEnv: []
+
+ # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay)
+ annotations: {}
+
+ # -- Annotations to be added to hubble-relay pods
+ podAnnotations: {}
+
+ # -- Labels to be added to hubble-relay pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- The priority class to use for hubble-relay
+ priorityClassName: ""
+
+ # -- Configure termination grace period for hubble relay Deployment.
+ terminationGracePeriodSeconds: 1
+
+ # -- hubble-relay update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+
+ # -- Additional hubble-relay volumes.
+ extraVolumes: []
+
+ # -- Additional hubble-relay volumeMounts.
+ extraVolumeMounts: []
+
+ # -- hubble-relay pod security context
+ podSecurityContext:
+ fsGroup: 65532
+
+ # -- hubble-relay container security context
+ securityContext:
+ # readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 65532
+ runAsGroup: 65532
+ capabilities:
+ drop:
+ - ALL
+
+ # -- hubble-relay service configuration.
+ service:
+ # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort.
+ type: ClusterIP
+ # --- The port to use when the service type is set to NodePort.
+ nodePort: 31234
+
+ # -- Host to listen to. Specify an empty string to bind to all the interfaces.
+ listenHost: ""
+
+ # -- Port to listen to.
+ listenPort: "4245"
+
+ # -- TLS configuration for Hubble Relay
+ tls:
+ # -- base64 encoded PEM values for the hubble-relay client certificate and private key
+ # This keypair is presented to Hubble server instances for mTLS
+ # authentication and is required when hubble.tls.enabled is true.
+ # These values need to be set manually if hubble.tls.auto.enabled is false.
+ client:
+ cert: ""
+ key: ""
+ # -- base64 encoded PEM values for the hubble-relay server certificate and private key
+ server:
+ # When set to true, enable TLS on for Hubble Relay server
+ # (ie: for clients connecting to the Hubble Relay API).
+ enabled: false
+ # When set to true enforces mutual TLS between Hubble Relay server and its clients.
+ # False allow non-mutual TLS connections.
+ # This option has no effect when TLS is disabled.
+ mtls: false
+ # These values need to be set manually if hubble.tls.auto.enabled is false.
+ cert: ""
+ key: ""
+ # -- extra DNS names added to certificate when its auto gen
+ extraDnsNames: []
+ # -- extra IP addresses added to certificate when its auto gen
+ extraIpAddresses: []
+ # DNS name used by the backend to connect to the relay
+ # This is a simple workaround as the relay certificates are currently hardcoded to
+ # *.hubble-relay.cilium.io
+ # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546
+ # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local
+ relayName: "ui.hubble-relay.cilium.io"
+
+ # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s").
+ dialTimeout: ~
+
+ # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s").
+ retryTimeout: ~
+
+ # -- Max number of flows that can be buffered for sorting before being sent to the
+ # client (per request) (e.g. 100).
+ sortBufferLenMax: ~
+
+ # -- When the per-request flows sort buffer is not full, a flow is drained every
+ # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s").
+ sortBufferDrainTimeout: ~
+
+ # -- Port to use for the k8s service backed by hubble-relay pods.
+ # If not set, it is dynamically assigned to port 443 if TLS is enabled and to
+ # port 80 if not.
+ # servicePort: 80
+
+ # -- Enable prometheus metrics for hubble-relay on the configured port at
+ # /metrics
+ prometheus:
+ enabled: false
+ port: 9966
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor hubble-relay
+ labels: {}
+ # -- Annotations to add to ServiceMonitor hubble-relay
+ annotations: {}
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+ # -- Relabeling configs for the ServiceMonitor hubble-relay
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor hubble-relay
+ metricRelabelings: ~
+
+ gops:
+ # -- Enable gops for hubble-relay
+ enabled: true
+ # -- Configure gops listen port for hubble-relay
+ port: 9893
+
+ pprof:
+ # -- Enable pprof for hubble-relay
+ enabled: false
+ # -- Configure pprof listen address for hubble-relay
+ address: localhost
+ # -- Configure pprof listen port for hubble-relay
+ port: 6062
+
+ ui:
+ # -- Whether to enable the Hubble UI.
+ enabled: false
+
+ standalone:
+ # -- When true, it will allow installing the Hubble UI only, without checking dependencies.
+ # It is useful if a cluster already has cilium and Hubble relay installed and you just
+ # want Hubble UI to be deployed.
+ # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui`
+ enabled: false
+
+ tls:
+ # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required
+ # to provide a volume for mounting the client certificates.
+ certsVolume: {}
+ # projected:
+ # defaultMode: 0400
+ # sources:
+ # - secret:
+ # name: hubble-ui-client-certs
+ # items:
+ # - key: tls.crt
+ # path: client.crt
+ # - key: tls.key
+ # path: client.key
+ # - key: ca.crt
+ # path: hubble-relay-ca.crt
+
+ # -- Roll out Hubble-ui pods automatically when configmap is updated.
+ rollOutPods: false
+
+ tls:
+ # -- base64 encoded PEM values used to connect to hubble-relay
+ # This keypair is presented to Hubble Relay instances for mTLS
+ # authentication and is required when hubble.relay.tls.server.enabled is true.
+ # These values need to be set manually if hubble.tls.auto.enabled is false.
+ client:
+ cert: ""
+ key: ""
+
+ backend:
+ # -- Hubble-ui backend image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/hubble-ui-backend"
+ tag: "v0.13.0"
+ digest: "sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+
+ # -- Hubble-ui backend security context.
+ securityContext: {}
+
+ # -- Additional hubble-ui backend environment variables.
+ extraEnv: []
+
+ # -- Additional hubble-ui backend volumes.
+ extraVolumes: []
+
+ # -- Additional hubble-ui backend volumeMounts.
+ extraVolumeMounts: []
+
+ livenessProbe:
+ # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
+ enabled: false
+
+ readinessProbe:
+ # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
+ enabled: false
+
+ # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment.
+ resources: {}
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+
+ frontend:
+ # -- Hubble-ui frontend image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/hubble-ui"
+ tag: "v0.13.0"
+ digest: "sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+
+ # -- Hubble-ui frontend security context.
+ securityContext: {}
+
+ # -- Additional hubble-ui frontend environment variables.
+ extraEnv: []
+
+ # -- Additional hubble-ui frontend volumes.
+ extraVolumes: []
+
+ # -- Additional hubble-ui frontend volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment.
+ resources: {}
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ server:
+ # -- Controls server listener for ipv6
+ ipv6:
+ enabled: true
+
+ # -- The number of replicas of Hubble UI to deploy.
+ replicas: 1
+
+ # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui)
+ annotations: {}
+
+ # -- Annotations to be added to hubble-ui pods
+ podAnnotations: {}
+
+ # -- Labels to be added to hubble-ui pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- Affinity for hubble-ui
+ affinity: {}
+
+ # -- Pod topology spread constraints for hubble-ui
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- The priority class to use for hubble-ui
+ priorityClassName: ""
+
+ # -- hubble-ui update strategy.
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+
+ # -- Security context to be added to Hubble UI pods
+ securityContext:
+ runAsUser: 1001
+ runAsGroup: 1001
+ fsGroup: 1001
+
+ # -- hubble-ui service configuration.
+ service:
+ # -- Annotations to be added for the Hubble UI service
+ annotations: {}
+ # --- The type of service used for Hubble UI access, either ClusterIP or NodePort.
+ type: ClusterIP
+ # --- The port to use when the service type is set to NodePort.
+ nodePort: 31235
+
+ # -- Defines base url prefix for all hubble-ui http requests.
+ # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path.
+ # Trailing `/` is required for custom path, ex. `/service-map/`
+ baseUrl: "/"
+
+ # -- hubble-ui ingress configuration.
+ ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
+ className: ""
+ hosts:
+ - chart-example.local
+ labels: {}
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+ # -- Hubble flows export.
+ export:
+ # --- Defines max file size of output file before it gets rotated.
+ fileMaxSizeMb: 10
+ # --- Defines max number of backup/rotated files.
+ fileMaxBackups: 5
+ # --- Static exporter configuration.
+ # Static exporter is bound to agent lifecycle.
+ static:
+ enabled: false
+ filePath: /var/run/cilium/hubble/events.log
+ fieldMask: []
+ # - time
+ # - source
+ # - destination
+ # - verdict
+ allowList: []
+ # - '{"verdict":["DROPPED","ERROR"]}'
+ denyList: []
+ # - '{"source_pod":["kube-system/"]}'
+ # - '{"destination_pod":["kube-system/"]}'
+ # --- Dynamic exporters configuration.
+ # Dynamic exporters may be reconfigured without a need of agent restarts.
+ dynamic:
+ enabled: false
+ config:
+ # ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents.
+ configMapName: cilium-flowlog-config
+ # ---- True if helm installer should create config map.
+ # Switch to false if you want to self maintain the file content.
+ createConfigMap: true
+ # ---- Exporters configuration in YAML format.
+ content:
+ - name: all
+ fieldMask: []
+ includeFilters: []
+ excludeFilters: []
+ filePath: "/var/run/cilium/hubble/events.log"
+ #- name: "test002"
+ # filePath: "/var/log/network/flow-log/pa/test002.log"
+ # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"]
+ # includeFilters:
+ # - source_pod: ["default/"]
+ # event_type:
+ # - type: 1
+ # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"]
+ # excludeFilters: []
+ # end: "2023-10-09T23:59:59-07:00"
+
+ # -- Method to use for identity allocation (`crd` or `kvstore`).
+ identityAllocationMode: "crd"
+
+ # -- (string) Time to wait before using new identity on endpoint identity change.
+ # @default -- `"5s"`
+ identityChangeGracePeriod: ""
+
+ # -- Install Iptables rules to skip netfilter connection tracking on all pod
+ # traffic. This option is only effective when Cilium is running in direct
+ # routing and full KPR mode. Moreover, this option cannot be enabled when Cilium
+ # is running in a managed Kubernetes environment or in a chained CNI setup.
+ installNoConntrackIptablesRules: false
+
+ ipam:
+ # -- Configure IP Address Management mode.
+ # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/
+ # For this pack, the default mode has been switched from "cluster-pool" to
+ # "kubernetes" so that Cilium respects the PodCIDR that is configured
+ # in the K8s pack.
+ mode: "kubernetes"
+ # The alternative below is the default for the Cilium helm chart
+ # mode: "cluster-pool"
+ # # -- Maximum rate at which the CiliumNode custom resource is updated.
+ # ciliumNodeUpdateRate: "15s"
+ # operator:
+ # # -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
+ # clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"]
+ # # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
+ # clusterPoolIPv4MaskSize: 24
+ # # -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
+ # clusterPoolIPv6PodCIDRList: ["fd00::/104"]
+ # # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
+ # clusterPoolIPv6MaskSize: 120
+ # # -- IP pools to auto-create in multi-pool IPAM mode.
+ # autoCreateCiliumPodIPPools: {}
+ # # default:
+ # # ipv4:
+ # # cidrs:
+ # # - 10.10.0.0/8
+ # # maskSize: 24
+ # # other:
+ # # ipv6:
+ # # cidrs:
+ # # - fd00:100::/80
+ # # maskSize: 96
+ # # -- The maximum burst size when rate limiting access to external APIs.
+ # # Also known as the token bucket capacity.
+ # # @default -- `20`
+ # externalAPILimitBurstSize: ~
+ # # -- The maximum queries per second when rate limiting access to
+ # # external APIs. Also known as the bucket refill rate, which is used to
+ # # refill the bucket up to the burst size capacity.
+ # # @default -- `4.0`
+ # externalAPILimitQPS: ~
+
+ # -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API
+ apiRateLimit: ~
+
+ # -- Configure the eBPF-based ip-masq-agent
+ ipMasqAgent:
+ enabled: false
+ # the config of nonMasqueradeCIDRs
+ # config:
+ # nonMasqueradeCIDRs: []
+ # masqLinkLocal: false
+ # masqLinkLocalIPv6: false
+
+ # iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium.
+ # iptablesLockTimeout: "5s"
+
+ ipv4:
+ # -- Enable IPv4 support.
+ enabled: true
+
+ ipv6:
+ # -- Enable IPv6 support.
+ enabled: false
+
+ # -- Configure Kubernetes specific configuration
+ k8s: {}
+ # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR
+ # range via the Kubernetes node resource
+ # requireIPv4PodCIDR: false
+
+ # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR
+ # range via the Kubernetes node resource
+ # requireIPv6PodCIDR: false
+
+ # -- Keep the deprecated selector labels when deploying Cilium DaemonSet.
+ keepDeprecatedLabels: false
+
+ # -- Keep the deprecated probes when deploying Cilium DaemonSet
+ keepDeprecatedProbes: false
+
+ startupProbe:
+ # -- failure threshold of startup probe.
+ # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
+ failureThreshold: 105
+ # -- interval between checks of the startup probe
+ periodSeconds: 2
+ livenessProbe:
+ # -- failure threshold of liveness probe
+ failureThreshold: 10
+ # -- interval between checks of the liveness probe
+ periodSeconds: 30
+ readinessProbe:
+ # -- failure threshold of readiness probe
+ failureThreshold: 3
+ # -- interval between checks of the readiness probe
+ periodSeconds: 30
+
+ # -- Configure the kube-proxy replacement in Cilium BPF datapath
+ # Valid options are "true", "false", "disabled" (deprecated), "partial" (deprecated), "strict" (deprecated).
+ # ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/
+ #kubeProxyReplacement: "false"
+
+ # -- healthz server bind address for the kube-proxy replacement.
+ # To enable set the value to '0.0.0.0:10256' for all ipv4
+ # addresses and this '[::]:10256' for all ipv6 addresses.
+ # By default it is disabled.
+ kubeProxyReplacementHealthzBindAddr: ""
+
+ l2NeighDiscovery:
+ # -- Enable L2 neighbor discovery in the agent
+ enabled: true
+ # -- Override the agent's default neighbor resolution refresh period.
+ refreshPeriod: "30s"
+
+ # -- Enable Layer 7 network policy.
+ l7Proxy: true
+
+ # -- Enable Local Redirect Policy.
+ localRedirectPolicy: false
+
+ # To include or exclude matched resources from cilium identity evaluation
+ # labels: ""
+
+ # logOptions allows you to define logging options. eg:
+ # logOptions:
+ # format: json
+
+ # -- Enables periodic logging of system load
+ logSystemLoad: false
+
+ # -- Configure maglev consistent hashing
+ maglev: {}
+ # -- tableSize is the size (parameter M) for the backend table of one
+ # service entry
+ # tableSize:
+
+ # -- hashSeed is the cluster-wide base64 encoded seed for the hashing
+ # hashSeed:
+
+ # -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
+ enableIPv4Masquerade: true
+
+ # -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
+ enableIPv6Masquerade: true
+
+ # -- Enables masquerading to the source of the route for traffic leaving the node from endpoints.
+ enableMasqueradeRouteSource: false
+
+ # -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods
+ enableIPv4BIGTCP: false
+
+ # -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods
+ enableIPv6BIGTCP: false
+
+ egressGateway:
+ # -- Enables egress gateway to redirect and SNAT the traffic that leaves the
+ # cluster.
+ enabled: false
+ # -- Deprecated without a replacement necessary.
+ installRoutes: false
+ # -- Time between triggers of egress gateway state reconciliations
+ reconciliationTriggerInterval: 1s
+ # -- Maximum number of entries in egress gateway policy map
+ # maxPolicyEntries: 16384
+
+ vtep:
+ # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow
+ # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel.
+ enabled: false
+
+ # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1"
+ endpoint: ""
+ # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24"
+ cidr: ""
+ # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0"
+ mask: ""
+ # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y"
+ mac: ""
+
+ # -- (string) Allows to explicitly specify the IPv4 CIDR for native routing.
+ # When specified, Cilium assumes networking for this CIDR is preconfigured and
+ # hands traffic destined for that range to the Linux network stack without
+ # applying any SNAT.
+ # Generally speaking, specifying a native routing CIDR implies that Cilium can
+ # depend on the underlying networking stack to route packets to their
+ # destination. To offer a concrete example, if Cilium is configured to use
+ # direct routing and the Kubernetes CIDR is included in the native routing CIDR,
+ # the user must configure the routes to reach pods, either manually or by
+ # setting the auto-direct-node-routes flag.
+ ipv4NativeRoutingCIDR: ""
+
+ # -- (string) Allows to explicitly specify the IPv6 CIDR for native routing.
+ # When specified, Cilium assumes networking for this CIDR is preconfigured and
+ # hands traffic destined for that range to the Linux network stack without
+ # applying any SNAT.
+ # Generally speaking, specifying a native routing CIDR implies that Cilium can
+ # depend on the underlying networking stack to route packets to their
+ # destination. To offer a concrete example, if Cilium is configured to use
+ # direct routing and the Kubernetes CIDR is included in the native routing CIDR,
+ # the user must configure the routes to reach pods, either manually or by
+ # setting the auto-direct-node-routes flag.
+ ipv6NativeRoutingCIDR: ""
+
+ # -- cilium-monitor sidecar.
+ monitor:
+ # -- Enable the cilium-monitor sidecar.
+ enabled: false
+
+ # -- Configure service load balancing
+ loadBalancer:
+ # -- standalone enables the standalone L4LB which does not connect to
+ # kube-apiserver.
+ # standalone: false
+
+ # -- algorithm is the name of the load balancing algorithm for backend
+ # selection e.g. random or maglev
+ # algorithm: random
+
+ # -- mode is the operation mode of load balancing for remote backends
+ # e.g. snat, dsr, hybrid
+ # mode: snat
+
+ # -- acceleration is the option to accelerate service handling via XDP
+ # Applicable values can be: disabled (do not use XDP), native (XDP BPF
+ # program is run directly out of the networking driver's early receive
+ # path), or best-effort (use native mode XDP acceleration on devices
+ # that support it).
+ acceleration: disabled
+
+ # -- dsrDispatch configures whether IP option or IPIP encapsulation is
+ # used to pass a service IP and port to remote backend
+ # dsrDispatch: opt
+
+ # -- serviceTopology enables K8s Topology Aware Hints -based service
+ # endpoints filtering
+ # serviceTopology: false
+
+ # -- L7 LoadBalancer
+ l7:
+ # -- Enable L7 service load balancing via envoy proxy.
+ # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7,
+ # will be forwarded to the local backend proxy to be load balanced to the service endpoints.
+ # Please refer to docs for supported annotations for more configuration.
+ #
+ # Applicable values:
+ # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well.
+ # - disabled: Disable L7 load balancing by way of service annotation.
+ backend: disabled
+ # -- List of ports from service to be automatically redirected to above backend.
+ # Any service exposing one of these ports will be automatically redirected.
+ # Fine-grained control can be achieved by using the service annotation.
+ ports: []
+ # -- Default LB algorithm
+ # The default LB algorithm to be used for services, which can be overridden by the
+ # service annotation (e.g. service.cilium.io/lb-l7-algorithm)
+ # Applicable values: round_robin, least_request, random
+ algorithm: round_robin
+
+ # -- Configure N-S k8s service loadbalancing
+ nodePort:
+ # -- Enable the Cilium NodePort service implementation.
+ enabled: false
+
+ # -- Port range to use for NodePort services.
+ # range: "30000,32767"
+
+ # -- Set to true to prevent applications binding to service ports.
+ bindProtection: true
+
+ # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral
+ # ports is detected.
+ autoProtectPortRange: true
+
+ # -- Enable healthcheck nodePort server for NodePort services
+ enableHealthCheck: true
+
+ # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs
+ # EnableHealthCheck to be enabled
+ enableHealthCheckLoadBalancerIP: false
+
+ # policyAuditMode: false
+
+ # -- The agent can be put into one of the three policy enforcement modes:
+ # default, always and never.
+ # ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes
+ policyEnforcementMode: "default"
+
+ # -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector.
+ # The possible value is "nodes".
+ policyCIDRMatchMode:
+
+ pprof:
+ # -- Enable pprof for cilium-agent
+ enabled: false
+ # -- Configure pprof listen address for cilium-agent
+ address: localhost
+ # -- Configure pprof listen port for cilium-agent
+ port: 6060
+
+ # -- Configure prometheus metrics on the configured port at /metrics
+ prometheus:
+ enabled: false
+ port: 9962
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor cilium-agent
+ labels: {}
+ # -- Annotations to add to ServiceMonitor cilium-agent
+ annotations: {}
+ # -- jobLabel to add for ServiceMonitor cilium-agent
+ jobLabel: ""
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+ # -- Relabeling configs for the ServiceMonitor cilium-agent
+ relabelings:
+ - sourceLabels:
+ - __meta_kubernetes_pod_node_name
+ targetLabel: node
+ replacement: ${1}
+ # -- Metrics relabeling configs for the ServiceMonitor cilium-agent
+ metricRelabelings: ~
+ # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying
+ trustCRDsExist: false
+
+ # -- Metrics that should be enabled or disabled from the default metric list.
+ # The list is expected to be separated by a space. (+metric_foo to enable
+ # metric_foo , -metric_bar to disable metric_bar).
+ # ref: https://docs.cilium.io/en/stable/observability/metrics/
+ metrics: ~
+
+ # --- Enable controller group metrics for monitoring specific Cilium
+ # subsystems. The list is a list of controller group names. The special
+ # values of "all" and "none" are supported. The set of controller
+ # group names is not guaranteed to be stable between Cilium versions.
+ controllerGroupMetrics:
+ - write-cni-file
+ - sync-host-ips
+ - sync-lb-maps-with-k8s-services
+
+ # -- Grafana dashboards for cilium-agent
+ # grafana can import dashboards based on the label and value
+ # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
+ dashboards:
+ enabled: false
+ label: grafana_dashboard
+ namespace: ~
+ labelValue: "1"
+ annotations: {}
+
+ # -- Configure Istio proxy options.
+ proxy:
+
+ prometheus:
+ # -- Deprecated in favor of envoy.prometheus.enabled
+ enabled: true
+ # -- Deprecated in favor of envoy.prometheus.port
+ port: ~
+ # -- Regular expression matching compatible Istio sidecar istio-proxy
+ # container image names
+ sidecarImageRegex: "cilium/istio_proxy"
+
+ # Configure Cilium Envoy options.
+ envoy:
+ # -- Enable Envoy Proxy in standalone DaemonSet.
+ enabled: false
+
+ log:
+ # -- The format string to use for laying out the log message metadata of Envoy.
+ format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"
+ # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout.
+ path: ""
+
+ # -- Time in seconds after which a TCP connection attempt times out
+ connectTimeoutSeconds: 2
+ # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy
+ maxRequestsPerConnection: 0
+ # -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable)
+ maxConnectionDurationSeconds: 0
+ # -- Set Envoy upstream HTTP idle connection timeout seconds.
+ # Does not apply to connections with pending requests. Default 60s
+ idleTimeoutDurationSeconds: 60
+
+ # -- Envoy container image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium-envoy"
+ tag: "v1.27.3-99c1c8f42c8de70fc8f6dd594f4a425cd38b6688"
+ pullPolicy: "IfNotPresent"
+ digest: "sha256:877ead12d08d4c04a9f67f86d3c6e542aeb7bf97e1e401aee74de456f496ac30"
+ useDigest: true
+
+ # -- Additional containers added to the cilium Envoy DaemonSet.
+ extraContainers: []
+
+ # -- Additional envoy container arguments.
+ extraArgs: []
+
+ # -- Additional envoy container environment variables.
+ extraEnv: []
+
+ # -- Additional envoy hostPath mounts.
+ extraHostPathMounts: []
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
+
+ # -- Additional envoy volumes.
+ extraVolumes: []
+
+ # -- Additional envoy volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Configure termination grace period for cilium-envoy DaemonSet.
+ terminationGracePeriodSeconds: 1
+
+ # -- TCP port for the health API.
+ healthPort: 9878
+
+ # -- cilium-envoy update strategy
+ # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 2
+ # -- Roll out cilium envoy pods automatically when configmap is updated.
+ rollOutPods: false
+
+ # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy)
+ annotations: {}
+
+ # -- Security Context for cilium-envoy pods.
+ podSecurityContext: {}
+
+ # -- Annotations to be added to envoy pods
+ podAnnotations: {}
+
+ # -- Labels to be added to envoy pods
+ podLabels: {}
+
+ # -- Envoy resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ startupProbe:
+ # -- failure threshold of startup probe.
+ # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
+ failureThreshold: 105
+ # -- interval between checks of the startup probe
+ periodSeconds: 2
+ livenessProbe:
+ # -- failure threshold of liveness probe
+ failureThreshold: 10
+ # -- interval between checks of the liveness probe
+ periodSeconds: 30
+ readinessProbe:
+ # -- failure threshold of readiness probe
+ failureThreshold: 3
+ # -- interval between checks of the readiness probe
+ periodSeconds: 30
+
+ securityContext:
+ # -- User to run the pod with
+ # runAsUser: 0
+ # -- Run the pod with elevated privileges
+ privileged: false
+ # -- SELinux options for the `cilium-envoy` container
+ seLinuxOptions:
+ level: 's0'
+ # Running with spc_t since we have removed the privileged mode.
+ # Users can change it to a different type as long as they have the
+ # type available on the system.
+ type: 'spc_t'
+ capabilities:
+ # -- Capabilities for the `cilium-envoy` container
+ envoy:
+ # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT
+ - NET_ADMIN
+ # We need it for now but might not need it for >= 5.11 specially
+ # for the 'SYS_RESOURCE'.
+ # In >= 5.8 there's already BPF and PERMON capabilities
+ - SYS_ADMIN
+ # Both PERFMON and BPF requires kernel 5.8, container runtime
+ # cri-o >= v1.22.0 or containerd >= v1.5.0.
+ # If available, SYS_ADMIN can be removed.
+ #- PERFMON
+ #- BPF
+
+ # -- Affinity for cilium-envoy.
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium-envoy
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: cilium.io/no-schedule
+ operator: NotIn
+ values:
+ - "true"
+ # -- Node selector for cilium-envoy.
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for envoy scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- The priority class to use for cilium-envoy.
+ priorityClassName: ~
+
+ # -- DNS policy for Cilium envoy pods.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ dnsPolicy: ~
+
+ # -- Configure Cilium Envoy Prometheus options.
+ # Note that some of these apply to either cilium-agent or cilium-envoy.
+ prometheus:
+ # -- Enable prometheus metrics for cilium-envoy
+ enabled: true
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ # Note that this setting applies to both cilium-envoy _and_ cilium-agent
+ # with Envoy enabled.
+ enabled: false
+ # -- Labels to add to ServiceMonitor cilium-envoy
+ labels: {}
+ # -- Annotations to add to ServiceMonitor cilium-envoy
+ annotations: {}
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+ # -- Relabeling configs for the ServiceMonitor cilium-envoy
+ # or for cilium-agent with Envoy configured.
+ relabelings:
+ - sourceLabels:
+ - __meta_kubernetes_pod_node_name
+ targetLabel: node
+ replacement: ${1}
+ # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
+ # or for cilium-agent with Envoy configured.
+ metricRelabelings: ~
+ # -- Serve prometheus metrics for cilium-envoy on the configured port
+ port: "9964"
+
+ # -- Enable use of the remote node identity.
+ # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
+ # Deprecated without replacement in 1.15. To be removed in 1.16.
+ remoteNodeIdentity: true
+
+ # -- Enable resource quotas for priority classes used in the cluster.
+ resourceQuotas:
+ enabled: false
+ cilium:
+ hard:
+ # 5k nodes * 2 DaemonSets (Cilium and cilium node init)
+ pods: "10k"
+ operator:
+ hard:
+ # 15 "clusterwide" Cilium Operator pods for HA
+ pods: "15"
+
+ # Need to document default
+ ##################
+ #sessionAffinity: false
+
+ # -- Do not run Cilium agent when running with clean mode. Useful to completely
+ # uninstall Cilium as it will stop Cilium from starting and create artifacts
+ # in the node.
+ sleepAfterInit: false
+
+ # -- Enable check of service source ranges (currently, only for LoadBalancer).
+ svcSourceRangeCheck: true
+
+ # -- Synchronize Kubernetes nodes to kvstore and perform CNP GC.
+ synchronizeK8sNodes: true
+
+ # -- Configure TLS configuration in the agent.
+ tls:
+ # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies
+ # (namely the secrets referenced by terminatingTLS and originatingTLS).
+ # Possible values:
+ # - local
+ # - k8s
+ secretsBackend: local
+
+ # -- Base64 encoded PEM values for the CA certificate and private key.
+ # This can be used as common CA to generate certificates used by hubble and clustermesh components.
+ # It is neither required nor used when cert-manager is used to generate the certificates.
+ ca:
+ # -- Optional CA cert. If it is provided, it will be used by cilium to
+ # generate all other certificates. Otherwise, an ephemeral CA is generated.
+ cert: ""
+
+ # -- Optional CA private key. If it is provided, it will be used by cilium to
+ # generate all other certificates. Otherwise, an ephemeral CA is generated.
+ key: ""
+
+ # -- Generated certificates validity duration in days. This will be used for auto generated CA.
+ certValidityDuration: 1095
+
+ # -- Configure the CA trust bundle used for the validation of the certificates
+ # leveraged by hubble and clustermesh. When enabled, it overrides the content of the
+ # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time.
+ caBundle:
+ # -- Enable the use of the CA trust bundle.
+ enabled: false
+
+ # -- Name of the ConfigMap containing the CA trust bundle.
+ name: cilium-root-ca.crt
+
+ # -- Entry of the ConfigMap containing the CA trust bundle.
+ key: ca.crt
+
+ # -- Use a Secret instead of a ConfigMap.
+ useSecret: false
+
+ # If uncommented, creates the ConfigMap and fills it with the specified content.
+ # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace.
+ #
+ # content: |
+ # -----BEGIN CERTIFICATE-----
+ # ...
+ # -----END CERTIFICATE-----
+ # -----BEGIN CERTIFICATE-----
+ # ...
+ # -----END CERTIFICATE-----
+
+ # -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels.
+ # Possible values:
+ # - ""
+ # - vxlan
+ # - geneve
+ # @default -- `"vxlan"`
+ tunnelProtocol: ""
+
+ # -- Enable native-routing mode or tunneling mode.
+ # Possible values:
+ # - ""
+ # - native
+ # - tunnel
+ # @default -- `"tunnel"`
+ routingMode: ""
+
+ # -- Configure VXLAN and Geneve tunnel port.
+ # @default -- Port 8472 for VXLAN, Port 6081 for Geneve
+ tunnelPort: 0
+
+ # -- Configure what the response should be to traffic for a service without backends.
+ # "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop".
+ # Possible values:
+ # - reject (default)
+ # - drop
+ serviceNoBackendResponse: reject
+
+ # -- Configure the underlying network MTU to overwrite auto-detected MTU.
+ MTU: 0
+
+ # -- Disable the usage of CiliumEndpoint CRD.
+ disableEndpointCRD: false
+
+ wellKnownIdentities:
+ # -- Enable the use of well-known identities.
+ enabled: false
+
+ etcd:
+ # -- Enable etcd mode for the agent.
+ enabled: false
+
+ # -- cilium-etcd-operator image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium-etcd-operator"
+ tag: "v2.0.7"
+ digest: "sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+
+ # -- The priority class to use for cilium-etcd-operator
+ priorityClassName: ""
+
+ # -- Additional cilium-etcd-operator container arguments.
+ extraArgs: []
+
+ # -- Additional cilium-etcd-operator volumes.
+ extraVolumes: []
+
+ # -- Additional cilium-etcd-operator volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Pod topology spread constraints for cilium-etcd-operator
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for cilium-etcd-operator pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator)
+ annotations: {}
+
+ # -- Security context to be added to cilium-etcd-operator pods
+ podSecurityContext: {}
+
+ # -- Annotations to be added to cilium-etcd-operator pods
+ podAnnotations: {}
+
+ # -- Labels to be added to cilium-etcd-operator pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- cilium-etcd-operator resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ # -- Security context to be added to cilium-etcd-operator pods
+ securityContext: {}
+ # runAsUser: 0
+
+ # -- cilium-etcd-operator update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+
+ # -- If etcd is behind a k8s service set this option to true so that Cilium
+ # does the service translation automatically without requiring a DNS to be
+ # running.
+ k8sService: false
+
+ # -- Cluster domain for cilium-etcd-operator.
+ clusterDomain: cluster.local
+
+ # -- List of etcd endpoints (not needed when using managed=true).
+ endpoints:
+ - https://CHANGE-ME:2379
+
+ # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if
+ # managed=true)
+ ssl: false
+
+ operator:
+ # -- Enable the cilium-operator component (required).
+ enabled: true
+
+ # -- Roll out cilium-operator pods automatically when configmap is updated.
+ rollOutPods: false
+
+ # -- cilium-operator image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/operator"
+ tag: "v1.15.3"
+ # operator-generic-digest
+ genericDigest: ""
+ # operator-azure-digest
+ azureDigest: ""
+ # operator-aws-digest
+ awsDigest: ""
+ # operator-alibabacloud-digest
+ alibabacloudDigest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+ suffix: ""
+
+ # -- Number of replicas to run for the cilium-operator deployment
+ replicas: 2
+
+ # -- The priority class to use for cilium-operator
+ priorityClassName: ""
+
+ # -- DNS policy for Cilium operator pods.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
+ dnsPolicy: ""
+
+ # -- cilium-operator update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 25%
+ maxUnavailable: 50%
+
+ # -- Affinity for cilium-operator
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ io.cilium/app: operator
+
+ # -- Pod topology spread constraints for cilium-operator
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for cilium-operator pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for cilium-operator scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Additional cilium-operator container arguments.
+ extraArgs: []
+
+ # -- Additional cilium-operator environment variables.
+ extraEnv: []
+
+ # -- Additional cilium-operator hostPath mounts.
+ extraHostPathMounts: []
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
+
+ # -- Additional cilium-operator volumes.
+ extraVolumes: []
+
+ # -- Additional cilium-operator volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator)
+ annotations: {}
+
+ # -- Security context to be added to cilium-operator pods
+ podSecurityContext: {}
+
+ # -- Annotations to be added to cilium-operator pods
+ podAnnotations: {}
+
+ # -- Labels to be added to cilium-operator pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- cilium-operator resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 1000m
+ # memory: 1Gi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Security context to be added to cilium-operator pods
+ securityContext: {}
+ # runAsUser: 0
+
+ # -- Interval for endpoint garbage collection.
+ endpointGCInterval: "5m0s"
+
+ # -- Interval for cilium node garbage collection.
+ nodeGCInterval: "5m0s"
+
+ # -- Skip CNP node status clean up at operator startup.
+ skipCNPStatusStartupClean: false
+
+ # -- Interval for identity garbage collection.
+ identityGCInterval: "15m0s"
+
+ # -- Timeout for identity heartbeats.
+ identityHeartbeatTimeout: "30m0s"
+
+ pprof:
+ # -- Enable pprof for cilium-operator
+ enabled: false
+ # -- Configure pprof listen address for cilium-operator
+ address: localhost
+ # -- Configure pprof listen port for cilium-operator
+ port: 6061
+
+ # -- Enable prometheus metrics for cilium-operator on the configured port at
+ # /metrics
+ prometheus:
+ enabled: true
+ port: 9963
+ serviceMonitor:
+ # -- Enable service monitors.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor cilium-operator
+ labels: {}
+ # -- Annotations to add to ServiceMonitor cilium-operator
+ annotations: {}
+ # -- jobLabel to add for ServiceMonitor cilium-operator
+ jobLabel: ""
+ # -- Interval for scrape metrics.
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor cilium-operator
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor cilium-operator
+ metricRelabelings: ~
+
+ # -- Grafana dashboards for cilium-operator
+ # grafana can import dashboards based on the label and value
+ # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
+ dashboards:
+ enabled: false
+ label: grafana_dashboard
+ namespace: ~
+ labelValue: "1"
+ annotations: {}
+
+ # -- Skip CRDs creation for cilium-operator
+ skipCRDCreation: false
+
+ # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium
+ # pod running.
+ removeNodeTaints: true
+
+ # -- Taint nodes where Cilium is scheduled but not running. This prevents pods
+ # from being scheduled to nodes where Cilium is not the default CNI provider.
+ # @default -- same as removeNodeTaints
+ setNodeTaints: ~
+
+ # -- Set Node condition NetworkUnavailable to 'false' with the reason
+ # 'CiliumIsUp' for nodes that have a healthy Cilium pod.
+ setNodeNetworkStatus: true
+
+ unmanagedPodWatcher:
+ # -- Restart any pod that are not managed by Cilium.
+ restart: true
+ # -- Interval, in seconds, to check if there are any pods that are not
+ # managed by Cilium.
+ intervalSeconds: 15
+
+ nodeinit:
+ # -- Enable the node initialization DaemonSet
+ enabled: false
+
+ # -- node-init image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/startup-script"
+ tag: "62093c5c233ea914bfa26a10ba41f8780d9b737f"
+ pullPolicy: "IfNotPresent"
+
+ # -- The priority class to use for the nodeinit pod.
+ priorityClassName: ""
+
+ # -- node-init update strategy
+ updateStrategy:
+ type: RollingUpdate
+
+ # -- Additional nodeinit environment variables.
+ extraEnv: []
+
+ # -- Additional nodeinit volumes.
+ extraVolumes: []
+
+ # -- Additional nodeinit volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Affinity for cilium-nodeinit
+ affinity: {}
+
+ # -- Node labels for nodeinit pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for nodeinit scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit)
+ annotations: {}
+
+ # -- Annotations to be added to node-init pods.
+ podAnnotations: {}
+
+ # -- Labels to be added to node-init pods.
+ podLabels: {}
+
+ # -- nodeinit resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+
+ # -- Security context to be added to nodeinit pods.
+ securityContext:
+ privileged: false
+ seLinuxOptions:
+ level: 's0'
+ # Running with spc_t since we have removed the privileged mode.
+ # Users can change it to a different type as long as they have the
+ # type available on the system.
+ type: 'spc_t'
+ capabilities:
+ add:
+ # Used in iptables. Consider removing once we are iptables-free
+ - SYS_MODULE
+ # Used for nsenter
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_CHROOT
+ - SYS_PTRACE
+
+ # -- bootstrapFile is the location of the file where the bootstrap timestamp is
+ # written by the node-init DaemonSet
+ bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time"
+
+ # -- startup offers way to customize startup nodeinit script (pre and post position)
+ startup:
+ preScript: ""
+ postScript: ""
+ # -- prestop offers way to customize prestop nodeinit script (pre and post position)
+ prestop:
+ preScript: ""
+ postScript: ""
+
+ preflight:
+ # -- Enable Cilium pre-flight resources (required for upgrade)
+ enabled: false
+
+ # -- Cilium pre-flight image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/cilium"
+ tag: "v1.15.3"
+ # cilium-digest
+ digest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+
+ # -- The priority class to use for the preflight pod.
+ priorityClassName: ""
+
+ # -- preflight update strategy
+ updateStrategy:
+ type: RollingUpdate
+
+ # -- Additional preflight environment variables.
+ extraEnv: []
+
+ # -- Additional preflight volumes.
+ extraVolumes: []
+
+ # -- Additional preflight volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Affinity for cilium-preflight
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
+
+ # -- Node labels for preflight pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for preflight scheduling to nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - key: node.kubernetes.io/not-ready
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ effect: NoSchedule
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: "Exists"
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+
+ # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight)
+ annotations: {}
+
+ # -- Security context to be added to preflight pods.
+ podSecurityContext: {}
+
+ # -- Annotations to be added to preflight pods
+ podAnnotations: {}
+
+ # -- Labels to be added to the preflight pod.
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- preflight resource limits & requests
+ # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ resources: {}
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+
+ # -- Security context to be added to preflight pods
+ securityContext: {}
+ # runAsUser: 0
+
+ # -- Path to write the `--tofqdns-pre-cache` file to.
+ tofqdnsPreCache: ""
+
+ # -- Configure termination grace period for preflight Deployment and DaemonSet.
+ terminationGracePeriodSeconds: 1
+
+ # -- By default we should always validate the installed CNPs before upgrading
+ # Cilium. This will make sure the user will have the policies deployed in the
+ # cluster with the right schema.
+ validateCNPs: true
+
+ # -- Explicitly enable or disable priority class.
+ # .Capabilities.KubeVersion is unsettable in `helm template` calls,
+ # it depends on k8s libraries version that Helm was compiled against.
+ # This option allows to explicitly disable setting the priority class, which
+ # is useful for rendering charts for gke clusters in advance.
+ enableCriticalPriorityClass: true
+
+ # disableEnvoyVersionCheck removes the check for Envoy, which can be useful
+ # on AArch64 as the images do not currently ship a version of Envoy.
+ #disableEnvoyVersionCheck: false
+
+ clustermesh:
+ # -- Deploy clustermesh-apiserver for clustermesh
+ useAPIServer: false
+ # -- The maximum number of clusters to support in a ClusterMesh. This value
+ # cannot be changed on running clusters, and all clusters in a ClusterMesh
+ # must be configured with the same value. Values > 255 will decrease the
+ # maximum allocatable cluster-local identities.
+ # Supported values are 255 and 511.
+ maxConnectedClusters: 255
+
+ # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config)
+ annotations: {}
+
+ # -- Clustermesh explicit configuration.
+ config:
+ # -- Enable the Clustermesh explicit configuration.
+ enabled: false
+ # -- Default dns domain for the Clustermesh API servers
+ # This is used in the case cluster addresses are not provided
+ # and IPs are used.
+ domain: mesh.cilium.io
+ # -- List of clusters to be peered in the mesh.
+ clusters: []
+ # clusters:
+ # # -- Name of the cluster
+ # - name: cluster1
+ # # -- Address of the cluster, use this if you created DNS records for
+ # # the cluster Clustermesh API server.
+ # address: cluster1.mesh.cilium.io
+ # # -- Port of the cluster Clustermesh API server.
+ # port: 2379
+ # # -- IPs of the cluster Clustermesh API server, use multiple ones when
+ # # you have multiple IPs to access the Clustermesh API server.
+ # ips:
+ # - 172.18.255.201
+ # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority.
+ # # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the
+ # # "remote" private key and certificate available in the local cluster are automatically used instead.
+ # tls:
+ # cert: ""
+ # key: ""
+ # caCert: ""
+
+ apiserver:
+ # -- Clustermesh API server image.
+ image:
+ override: ~
+ repository: "quay.io/cilium/clustermesh-apiserver"
+ tag: "v1.15.3"
+ # clustermesh-apiserver-digest
+ digest: ""
+ useDigest: false
+ pullPolicy: "IfNotPresent"
+
+ etcd:
+ # The etcd binary is included in the clustermesh API server image, so the same image from above is reused.
+ # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is
+ # built with.
+
+ # -- Specifies the resources for etcd container in the apiserver
+ resources: {}
+ # requests:
+ # cpu: 200m
+ # memory: 256Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 256Mi
+
+ # -- Security context to be added to clustermesh-apiserver etcd containers
+ securityContext: {}
+
+ # -- lifecycle setting for the etcd container
+ lifecycle: {}
+
+ init:
+ # -- Specifies the resources for etcd init container in the apiserver
+ resources: {}
+ # requests:
+ # cpu: 100m
+ # memory: 100Mi
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+
+ # -- Additional arguments to `clustermesh-apiserver etcdinit`.
+ extraArgs: []
+
+ # -- Additional environment variables to `clustermesh-apiserver etcdinit`.
+ extraEnv: []
+
+ kvstoremesh:
+ # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved
+ # from the remote clusters in the local etcd instance.
+ enabled: false
+
+ # -- Additional KVStoreMesh arguments.
+ extraArgs: []
+
+ # -- Additional KVStoreMesh environment variables.
+ extraEnv: []
+
+ # -- Resource requests and limits for the KVStoreMesh container
+ resources: {}
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+
+ # -- Additional KVStoreMesh volumeMounts.
+ extraVolumeMounts: []
+
+ # -- KVStoreMesh Security context
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+
+ # -- lifecycle setting for the KVStoreMesh container
+ lifecycle: {}
+
+ service:
+ # -- The type of service used for apiserver access.
+ type: NodePort
+ # -- Optional port to use as the node port for apiserver access.
+ #
+ # WARNING: make sure to configure a different NodePort in each cluster if
+ # kube-proxy replacement is enabled, as Cilium is currently affected by a known
+ # bug (#24692) when NodePorts are handled by the KPR implementation. If a service
+ # with the same NodePort exists both in the local and the remote cluster, all
+ # traffic originating from inside the cluster and targeting the corresponding
+ # NodePort will be redirected to a local backend, regardless of whether the
+ # destination node belongs to the local or the remote cluster.
+ nodePort: 32379
+ # -- Optional loadBalancer IP address to use with type LoadBalancer.
+ # loadBalancerIP:
+
+ # -- Annotations for the clustermesh-apiserver
+ # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal"
+ # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
+ annotations: {}
+
+ # -- The externalTrafficPolicy of service used for apiserver access.
+ externalTrafficPolicy:
+
+ # -- The internalTrafficPolicy of service used for apiserver access.
+ internalTrafficPolicy:
+
+ # -- Number of replicas run for the clustermesh-apiserver deployment.
+ replicas: 1
+
+ # -- lifecycle setting for the apiserver container
+ lifecycle: {}
+
+ # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment
+ terminationGracePeriodSeconds: 30
+
+ # -- Additional clustermesh-apiserver arguments.
+ extraArgs: []
+
+ # -- Additional clustermesh-apiserver environment variables.
+ extraEnv: []
+
+ # -- Additional clustermesh-apiserver volumes.
+ extraVolumes: []
+
+ # -- Additional clustermesh-apiserver volumeMounts.
+ extraVolumeMounts: []
+
+ # -- Security context to be added to clustermesh-apiserver containers
+ securityContext: {}
+
+ # -- Security context to be added to clustermesh-apiserver pods
+ podSecurityContext: {}
+
+ # -- Annotations to be added to clustermesh-apiserver pods
+ podAnnotations: {}
+
+ # -- Labels to be added to clustermesh-apiserver pods
+ podLabels: {}
+
+ # PodDisruptionBudget settings
+ podDisruptionBudget:
+ # -- enable PodDisruptionBudget
+ # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+ enabled: false
+ # -- Minimum number/percentage of pods that should remain scheduled.
+ # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
+ minAvailable: null
+ # -- Maximum number/percentage of pods that may be made unavailable
+ maxUnavailable: 1
+
+ # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as
+ # resources:
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # -- Resource requests and limits for the clustermesh-apiserver
+ resources: {}
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
+
+ # -- Affinity for clustermesh.apiserver
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: clustermesh-apiserver
+
+ # -- Pod topology spread constraints for clustermesh-apiserver
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Node labels for pod assignment
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector:
+ kubernetes.io/os: linux
+
+ # -- Node tolerations for pod assignment on nodes with taints
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+
+ # -- clustermesh-apiserver update strategy
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+
+ # -- The priority class to use for clustermesh-apiserver
+ priorityClassName: ""
+
+ tls:
+ # -- Configure the clustermesh authentication mode.
+ # Supported values:
+ # - legacy: All clusters access remote clustermesh instances with the same
+ # username (i.e., remote). The "remote" certificate must be
+ # generated with CN=remote if provided manually.
+ # - migration: Intermediate mode required to upgrade from legacy to cluster
+ # (and vice versa) with no disruption. Specifically, it enables
+ # the creation of the per-cluster usernames, while still using
+ # the common one for authentication. The "remote" certificate must
+ # be generated with CN=remote if provided manually (same as legacy).
+ # - cluster: Each cluster accesses remote etcd instances with a username
+ # depending on the local cluster name (i.e., remote-).
+ # The "remote" certificate must be generated with CN=remote-
+ # if provided manually. Cluster mode is meaningful only when the same
+ # CA is shared across all clusters part of the mesh.
+ authMode: legacy
+
+ # -- Configure automatic TLS certificates generation.
+ # A Kubernetes CronJob is used the generate any
+ # certificates not provided by the user at installation
+ # time.
+ auto:
+ # -- When set to true, automatically generate a CA and certificates to
+ # enable mTLS between clustermesh-apiserver and external workload instances.
+ # If set to false, the certs to be provided by setting appropriate values below.
+ enabled: true
+ # Sets the method to auto-generate certificates. Supported values:
+ # - helm: This method uses Helm to generate all certificates.
+ # - cronJob: This method uses a Kubernetes CronJob the generate any
+ # certificates not provided by the user at installation
+ # time.
+ # - certmanager: This method use cert-manager to generate & rotate certificates.
+ method: helm
+ # -- Generated certificates validity duration in days.
+ certValidityDuration: 1095
+ # -- Schedule for certificates regeneration (regardless of their expiration date).
+ # Only used if method is "cronJob". If nil, then no recurring job will be created.
+ # Instead, only the one-shot job is deployed to generate the certificates at
+ # installation time.
+ #
+ # Due to the out-of-band distribution of client certs to external workloads the
+ # CA is (re)regenerated only if it is not provided as a helm value and the k8s
+ # secret is manually deleted.
+ #
+ # Defaults to none. Commented syntax gives midnight of the first day of every
+ # fourth month. For syntax, see
+ # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
+ # schedule: "0 0 1 */4 *"
+
+ # [Example]
+ # certManagerIssuerRef:
+ # group: cert-manager.io
+ # kind: ClusterIssuer
+ # name: ca-issuer
+ # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager.
+ certManagerIssuerRef: {}
+ # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key.
+ # Used if 'auto' is not enabled.
+ server:
+ cert: ""
+ key: ""
+ # -- Extra DNS names added to certificate when it's auto generated
+ extraDnsNames: []
+ # -- Extra IP addresses added to certificate when it's auto generated
+ extraIpAddresses: []
+ # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key.
+ # Used if 'auto' is not enabled.
+ admin:
+ cert: ""
+ key: ""
+ # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key.
+ # Used if 'auto' is not enabled.
+ client:
+ cert: ""
+ key: ""
+ # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key.
+ # Used if 'auto' is not enabled.
+ remote:
+ cert: ""
+ key: ""
+
+ # clustermesh-apiserver Prometheus metrics configuration
+ metrics:
+ # -- Enables exporting apiserver metrics in OpenMetrics format.
+ enabled: true
+ # -- Configure the port the apiserver metric server listens on.
+ port: 9962
+
+ kvstoremesh:
+ # -- Enables exporting KVStoreMesh metrics in OpenMetrics format.
+ enabled: true
+ # -- Configure the port the KVStoreMesh metric server listens on.
+ port: 9964
+
+ etcd:
+ # -- Enables exporting etcd metrics in OpenMetrics format.
+ enabled: true
+ # -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics.
+ mode: basic
+ # -- Configure the port the etcd metric server listens on.
+ port: 9963
+
+ serviceMonitor:
+ # -- Enable service monitor.
+ # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
+ enabled: false
+ # -- Labels to add to ServiceMonitor clustermesh-apiserver
+ labels: {}
+ # -- Annotations to add to ServiceMonitor clustermesh-apiserver
+ annotations: {}
+ # -- Specify the Kubernetes namespace where Prometheus expects to find
+ # service monitors configured.
+ # namespace: ""
+
+ # -- Interval for scrape metrics (apiserver metrics)
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
+ metricRelabelings: ~
+
+ kvstoremesh:
+ # -- Interval for scrape metrics (KVStoreMesh metrics)
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
+ metricRelabelings: ~
+
+ etcd:
+ # -- Interval for scrape metrics (etcd metrics)
+ interval: "10s"
+ # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
+ relabelings: ~
+ # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
+ metricRelabelings: ~
+
+ # -- Configure external workloads support
+ externalWorkloads:
+ # -- Enable support for external workloads, such as VMs (false by default).
+ enabled: false
+
+ # -- Configure cgroup related configuration
+ cgroup:
+ autoMount:
+ # -- Enable auto mount of cgroup2 filesystem.
+ # When `autoMount` is enabled, cgroup2 filesystem is mounted at
+ # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod.
+ # If users disable `autoMount`, it's expected that users have mounted
+ # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the
+ # volume will be mounted inside the cilium agent pod at the same path.
+ enabled: true
+ # -- Init Container Cgroup Automount resource limits & requests
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+ # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`)
+ hostRoot: /run/cilium/cgroupv2
+
+ # -- Configure whether to enable auto detect of terminating state for endpoints
+ # in order to support graceful termination.
+ enableK8sTerminatingEndpoint: true
+
+ # -- Configure whether to unload DNS policy rules on graceful shutdown
+ # dnsPolicyUnloadOnShutdown: false
+
+ # -- Configure the key of the taint indicating that Cilium is not ready on the node.
+ # When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up.
+ agentNotReadyTaintKey: "node.cilium.io/agent-not-ready"
+
+ dnsProxy:
+ # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'.
+ dnsRejectResponseCode: refused
+ # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
+ enableDnsCompression: true
+ # -- Maximum number of IPs to maintain per FQDN name for each endpoint.
+ endpointMaxIpPerHostname: 50
+ # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive.
+ idleConnectionGracePeriod: 0s
+ # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections.
+ maxDeferredConnectionDeletes: 10000
+ # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If
+ # the upstream DNS server returns a DNS record with a shorter TTL, Cilium
+ # overwrites the TTL with this value. Setting this value to zero means that
+ # Cilium will honor the TTLs returned by the upstream DNS server.
+ minTtl: 0
+ # -- DNS cache data at this path is preloaded on agent startup.
+ preCache: ""
+ # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port.
+ proxyPort: 0
+ # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
+ proxyResponseMaxDelay: 100ms
+ # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
+ # enableTransparentMode: true
+
+ # -- SCTP Configuration Values
+ sctp:
+ # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming.
+ enabled: false
+
+ # Configuration for types of authentication for Cilium (beta)
+ authentication:
+ # -- Enable authentication processing and garbage collection.
+ # Note that if disabled, policy enforcement will still block requests that require authentication.
+ # But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed.
+ enabled: true
+ # -- Buffer size of the channel Cilium uses to receive authentication events from the signal map.
+ queueSize: 1024
+ # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers.
+ rotatedIdentitiesQueueSize: 1024
+ # -- Interval for garbage collection of auth map entries.
+ gcInterval: "5m0s"
+ # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes.
+ # Note that this is not full mTLS support without also enabling encryption of some form.
+ # Current encryption options are Wireguard or IPSec, configured in encryption block above.
+ mutual:
+ # -- Port on the agent where mutual authentication handshakes between agents will be performed
+ port: 4250
+ # -- Timeout for connecting to the remote node TCP socket
+ connectTimeout: 5s
+ # Settings for SPIRE
+ spire:
+ # -- Enable SPIRE integration (beta)
+ enabled: false
+ # -- Annotations to be added to all top-level spire objects (resources under templates/spire)
+ annotations: {}
+ # Settings to control the SPIRE installation and configuration
+ install:
+ # -- Enable SPIRE installation.
+ # This will only take effect only if authentication.mutual.spire.enabled is true
+ enabled: true
+ # -- SPIRE namespace to install into
+ namespace: cilium-spire
+ # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace.
+ existingNamespace: false
+ # -- init container image of SPIRE agent and server
+ initImage:
+ override: ~
+ repository: "docker.io/library/busybox"
+ tag: "1.36.1"
+ digest: "sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # SPIRE agent configuration
+ agent:
+ # -- SPIRE agent image
+ image:
+ override: ~
+ repository: "ghcr.io/spiffe/spire-agent"
+ tag: "1.8.5"
+ digest: "sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # -- SPIRE agent service account
+ serviceAccount:
+ create: true
+ name: spire-agent
+ # -- SPIRE agent annotations
+ annotations: {}
+ # -- SPIRE agent labels
+ labels: {}
+ # -- SPIRE Workload Attestor kubelet verification.
+ skipKubeletVerification: true
+ # -- SPIRE agent tolerations configuration
+ # By default it follows the same tolerations as the agent itself
+ # to allow the Cilium agent on this node to connect to SPIRE.
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations:
+ - key: node.kubernetes.io/not-ready
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ effect: NoSchedule
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: "Exists"
+ # -- SPIRE agent affinity configuration
+ affinity: {}
+ # -- SPIRE agent nodeSelector configuration
+ # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # -- Security context to be added to spire agent pods.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ podSecurityContext: {}
+ # -- Security context to be added to spire agent containers.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ securityContext: {}
+ server:
+ # -- SPIRE server image
+ image:
+ override: ~
+ repository: "ghcr.io/spiffe/spire-server"
+ tag: "1.8.5"
+ digest: "sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428"
+ useDigest: true
+ pullPolicy: "IfNotPresent"
+ # -- SPIRE server service account
+ serviceAccount:
+ create: true
+ name: spire-server
+ # -- SPIRE server init containers
+ initContainers: []
+ # -- SPIRE server annotations
+ annotations: {}
+ # -- SPIRE server labels
+ labels: {}
+ # SPIRE server service configuration
+ service:
+ # -- Service type for the SPIRE server service
+ type: ClusterIP
+ # -- Annotations to be added to the SPIRE server service
+ annotations: {}
+ # -- Labels to be added to the SPIRE server service
+ labels: {}
+ # -- SPIRE server affinity configuration
+ affinity: {}
+ # -- SPIRE server nodeSelector configuration
+ # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # -- SPIRE server tolerations configuration
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ tolerations: []
+ # SPIRE server datastorage configuration
+ dataStorage:
+ # -- Enable SPIRE server data storage
+ enabled: true
+ # -- Size of the SPIRE server data storage
+ size: 1Gi
+ # -- Access mode of the SPIRE server data storage
+ accessMode: ReadWriteOnce
+ # -- StorageClass of the SPIRE server data storage
+ storageClass: null
+ # -- Security context to be added to spire server pods.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+ podSecurityContext: {}
+ # -- Security context to be added to spire server containers.
+ # SecurityContext holds pod-level security attributes and common container settings.
+ # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+ securityContext: {}
+ # SPIRE CA configuration
+ ca:
+ # -- SPIRE CA key type
+ # AWS requires the use of RSA. EC cryptography is not supported
+ keyType: "rsa-4096"
+ # -- SPIRE CA Subject
+ subject:
+ country: "US"
+ organization: "SPIRE"
+ commonName: "Cilium SPIRE CA"
+ # -- SPIRE server address used by Cilium Operator
+ #
+ # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format),
+ # Cilium Operator will resolve its address by looking up the clusterIP from Service resource.
+ #
+ # Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081
+ serverAddress: ~
+ # -- SPIFFE trust domain to use for fetching certificates
+ trustDomain: spiffe.cilium
+ # -- SPIRE socket path where the SPIRE delegated api agent is listening
+ adminSocketPath: /run/spire/sockets/admin.sock
+ # -- SPIRE socket path where the SPIRE workload agent is listening.
+ # Applies to both the Cilium Agent and Operator
+ agentSocketPath: /run/spire/sockets/agent/agent.sock
+ # -- SPIRE connection timeout
+ connectionTimeout: 30s
diff --git a/terraform/vmo-cluster/manifests/csi-values.yaml b/terraform/vmo-cluster/manifests/csi-values.yaml
new file mode 100644
index 0000000..9b64075
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/csi-values.yaml
@@ -0,0 +1,1317 @@
+pack:
+ content:
+ images: []
+
+ charts:
+ - repo: https://charts.rook.io/release
+ name: rook-release/rook-ceph
+ version: 1.14.9
+ - repo: https://charts.rook.io/release
+ name: rook-release/rook-ceph-cluster
+ version: 1.14.9
+
+ namespace: rook-ceph
+ namespaceLabels:
+ "rook-ceph": "pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v{{ .spectro.system.kubernetes.version | substr 0 4 }}"
+
+charts:
+ rook-ceph:
+ # Default values for rook-ceph-operator
+ image:
+ # -- Image
+ repository: rook/ceph
+ # -- Image tag
+ # @default -- `master`
+ tag: v1.14.9
+ # -- Image pull policy
+ pullPolicy: IfNotPresent
+
+ crds:
+ # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
+ # managed independently with deploy/examples/crds.yaml.
+ # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
+ # If the CRDs are deleted in this case, see
+ # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
+ # to restore them.
+ enabled: true
+
+ # -- Pod resource requests & limits
+ resources:
+ limits:
+ memory: 512Mi
+ requests:
+ cpu: 200m
+ memory: 128Mi
+
+ # -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
+ nodeSelector: {}
+ # Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+ # For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+ # disktype: ssd
+
+ # -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
+ tolerations: []
+
+ # -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
+ # the Kubernetes default of 5 minutes
+ unreachableNodeTolerationSeconds: 5
+
+ # -- Whether the operator should watch cluster CRD in its own namespace or not
+ currentNamespaceOnly: false
+
+ # -- Pod annotations
+ annotations: {}
+
+ # -- Global log level for the operator.
+ # Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
+ logLevel: INFO
+
+ # -- If true, create & use RBAC resources
+ rbacEnable: true
+
+ rbacAggregate:
+ # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
+ enableOBCs: false
+
+ # -- If true, create & use PSP resources
+ pspEnable: false
+
+ # -- Set the priority class for the rook operator deployment if desired
+ priorityClassName:
+
+ # -- Set the container security context for the operator
+ containerSecurityContext:
+ runAsNonRoot: true
+ runAsUser: 2016
+ runAsGroup: 2016
+ capabilities:
+ drop: [ "ALL" ]
+ # -- If true, loop devices are allowed to be used for osds in test clusters
+ allowLoopDevices: false
+
+ # Settings for whether to disable the drivers or other daemons if they are not
+ # needed
+ csi:
+ # -- Enable Ceph CSI RBD driver
+ enableRbdDriver: true
+ # -- Enable Ceph CSI CephFS driver
+ enableCephfsDriver: true
+ # -- Disable the CSI driver.
+ disableCsiDriver: "false"
+
+ # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+ # in some network configurations where the SDN does not provide access to an external cluster or
+ # there is significant drop in read/write performance
+ enableCSIHostNetwork: true
+ # -- Deprecation note: Rook uses "holder" pods to allow CSI to connect to the multus public network
+ # without needing hosts to the network. Holder pods are being removed. See issue for details:
+ # https://github.com/rook/rook/issues/13055. New Rook deployments should set this to "true".
+ disableHolderPods: true
+ # -- Enable Snapshotter in CephFS provisioner pod
+ enableCephfsSnapshotter: true
+ # -- Enable Snapshotter in NFS provisioner pod
+ enableNFSSnapshotter: true
+ # -- Enable Snapshotter in RBD provisioner pod
+ enableRBDSnapshotter: true
+ # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
+ enablePluginSelinuxHostMount: false
+ # -- Enable Ceph CSI PVC encryption support
+ enableCSIEncryption: false
+
+ # -- Enable volume group snapshot feature. This feature is
+ # enabled by default as long as the necessary CRDs are available in the cluster.
+ enableVolumeGroupSnapshot: true
+ # -- PriorityClassName to be set on csi driver plugin pods
+ pluginPriorityClassName: system-node-critical
+
+ # -- PriorityClassName to be set on csi driver provisioner pods
+ provisionerPriorityClassName: system-cluster-critical
+
+ # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ rbdFSGroupPolicy: "File"
+
+ # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ cephFSFSGroupPolicy: "File"
+
+ # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+ # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+ nfsFSGroupPolicy: "File"
+
+ # -- OMAP generator generates the omap mapping between the PV name and the RBD image
+ # which helps CSI to identify the rbd images for CSI operations.
+ # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
+ # By default OMAP generator is disabled and when enabled, it will be deployed as a
+ # sidecar with CSI provisioner pod, to enable set it to true.
+ enableOMAPGenerator: false
+
+ # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
+ # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
+ cephFSKernelMountOptions:
+
+ # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
+ # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+ # Hence enable metadata is false by default
+ enableMetadata: false
+
+ # -- Set replicas for csi provisioner deployment
+ provisionerReplicas: 2
+
+ # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
+ # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
+ clusterName:
+
+ # -- Set logging level for cephCSI containers maintained by the cephCSI.
+ # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+ logLevel: 0
+
+ # -- Set logging level for Kubernetes-csi sidecar containers.
+ # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+ # @default -- `0`
+ sidecarLogLevel:
+
+ # -- CSI driver name prefix for cephfs, rbd and nfs.
+ # @default -- `namespace name where rook-ceph operator is deployed`
+ csiDriverNamePrefix:
+
+ # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+ # @default -- `RollingUpdate`
+ rbdPluginUpdateStrategy:
+
+ # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
+ # @default -- `1`
+ rbdPluginUpdateStrategyMaxUnavailable:
+
+ # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+ # @default -- `RollingUpdate`
+ cephFSPluginUpdateStrategy:
+
+ # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
+ # @default -- `1`
+ cephFSPluginUpdateStrategyMaxUnavailable:
+
+ # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+ # @default -- `RollingUpdate`
+ nfsPluginUpdateStrategy:
+
+ # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
+ grpcTimeoutInSeconds: 150
+
+ # -- Allow starting an unsupported ceph-csi image
+ allowUnsupportedVersion: false
+
+ # -- Burst to use while communicating with the kubernetes apiserver.
+ kubeApiBurst:
+
+ # -- QPS to use while communicating with the kubernetes apiserver.
+ kubeApiQPS:
+
+ # -- The volume of the CephCSI RBD plugin DaemonSet
+ csiRBDPluginVolume: # - name: lib-modules
+
+ # hostPath:
+ # path: /run/booted-system/kernel-modules/lib/modules/
+ # - name: host-nix
+ # hostPath:
+ # path: /nix
+
+ # -- The volume mounts of the CephCSI RBD plugin DaemonSet
+ csiRBDPluginVolumeMount: # - name: host-nix
+
+ # mountPath: /nix
+ # readOnly: true
+
+ # -- The volume of the CephCSI CephFS plugin DaemonSet
+ csiCephFSPluginVolume: # - name: lib-modules
+
+ # hostPath:
+ # path: /run/booted-system/kernel-modules/lib/modules/
+ # - name: host-nix
+ # hostPath:
+ # path: /nix
+
+ # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
+ csiCephFSPluginVolumeMount: # - name: host-nix
+
+ # mountPath: /nix
+ # readOnly: true
+
+ # -- CEPH CSI RBD provisioner resource requirement list
+ # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
+ # @default -- see values.yaml
+ csiRBDProvisionerResource: |
+ - name : csi-provisioner
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-resizer
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-attacher
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-snapshotter
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-rbdplugin
+ resource:
+ requests:
+ memory: 512Mi
+ limits:
+ memory: 1Gi
+ - name : csi-omap-generator
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI RBD plugin resource requirement list
+ # @default -- see values.yaml
+ csiRBDPluginResource: |
+ - name : driver-registrar
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+ - name : csi-rbdplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI CephFS provisioner resource requirement list
+ # @default -- see values.yaml
+ csiCephFSProvisionerResource: |
+ - name : csi-provisioner
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-resizer
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-attacher
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-snapshotter
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-cephfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI CephFS plugin resource requirement list
+ # @default -- see values.yaml
+ csiCephFSPluginResource: |
+ - name : driver-registrar
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+ - name : csi-cephfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : liveness-prometheus
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+
+ # -- CEPH CSI NFS provisioner resource requirement list
+ # @default -- see values.yaml
+ csiNFSProvisionerResource: |
+ - name : csi-provisioner
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 100m
+ limits:
+ memory: 256Mi
+ - name : csi-nfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+ - name : csi-attacher
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+
+ # -- CEPH CSI NFS plugin resource requirement list
+ # @default -- see values.yaml
+ csiNFSPluginResource: |
+ - name : driver-registrar
+ resource:
+ requests:
+ memory: 128Mi
+ cpu: 50m
+ limits:
+ memory: 256Mi
+ - name : csi-nfsplugin
+ resource:
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ limits:
+ memory: 1Gi
+
+ # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
+ # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
+
+ # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
+ provisionerTolerations: # - key: key
+
+ # operator: Exists
+ # effect: NoSchedule
+
+ # -- The node labels for affinity of the CSI provisioner deployment [^1]
+ provisionerNodeAffinity:
+ #key1=value1,value2; key2=value3
+ # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
+
+ # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+
+ # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
+ pluginTolerations: # - key: key
+
+ # operator: Exists
+ # effect: NoSchedule
+
+ # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
+ pluginNodeAffinity: # key1=value1,value2; key2=value3
+
+ # -- Enable Ceph CSI Liveness sidecar deployment
+ enableLiveness: false
+
+ # -- CSI CephFS driver metrics port
+ # @default -- `9081`
+ cephfsLivenessMetricsPort:
+
+ # -- CSI Addons server port
+ # @default -- `9070`
+ csiAddonsPort:
+
+ # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
+ # you may want to disable this setting. However, this will cause an issue during upgrades
+ # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
+ forceCephFSKernelClient: true
+
+ # -- Ceph CSI RBD driver metrics port
+ # @default -- `8080`
+ rbdLivenessMetricsPort:
+
+ serviceMonitor:
+ # -- Enable ServiceMonitor for Ceph CSI drivers
+ enabled: false
+ # -- Service monitor scrape interval
+ interval: 10s
+ # -- ServiceMonitor additional labels
+ labels: {}
+ # -- Use a different namespace for the ServiceMonitor
+ namespace:
+
+ # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
+ # @default -- `/var/lib/kubelet`
+ kubeletDirPath:
+
+ # -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
+ # @default -- `137s`
+ csiLeaderElectionLeaseDuration:
+
+ # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
+ # @default -- `107s`
+ csiLeaderElectionRenewDeadline:
+
+ # -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
+ # @default -- `26s`
+ csiLeaderElectionRetryPeriod:
+
+ cephcsi:
+ # -- Ceph CSI image repository
+ repository: quay.io/cephcsi/cephcsi
+ # -- Ceph CSI image tag
+ tag: v3.11.0
+
+ registrar:
+ # -- Kubernetes CSI registrar image repository
+ repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
+ # -- Registrar image tag
+ tag: v2.10.1
+
+ provisioner:
+ # -- Kubernetes CSI provisioner image repository
+ repository: registry.k8s.io/sig-storage/csi-provisioner
+ # -- Provisioner image tag
+ tag: v4.0.1
+
+ snapshotter:
+ # -- Kubernetes CSI snapshotter image repository
+ repository: registry.k8s.io/sig-storage/csi-snapshotter
+ # -- Snapshotter image tag
+ tag: v7.0.2
+
+ attacher:
+ # -- Kubernetes CSI Attacher image repository
+ repository: registry.k8s.io/sig-storage/csi-attacher
+ # -- Attacher image tag
+ tag: v4.5.1
+
+ resizer:
+ # -- Kubernetes CSI resizer image repository
+ repository: registry.k8s.io/sig-storage/csi-resizer
+ # -- Resizer image tag
+ tag: v1.10.1
+
+ # -- Image pull policy
+ imagePullPolicy: IfNotPresent
+
+ # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
+ cephfsPodLabels: #"key1=value1,key2=value2"
+
+ # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
+ nfsPodLabels: #"key1=value1,key2=value2"
+
+ # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
+ rbdPodLabels: #"key1=value1,key2=value2"
+
+ csiAddons:
+ # -- Enable CSIAddons
+ enabled: false
+ # -- CSIAddons sidecar image repository
+ repository: quay.io/csiaddons/k8s-sidecar
+ # -- CSIAddons sidecar image tag
+ tag: v0.8.0
+
+ nfs:
+ # -- Enable the nfs csi driver
+ enabled: false
+
+ topology:
+ # -- Enable topology based provisioning
+ enabled: false
+ # NOTE: the value here serves as an example and needs to be
+ # updated with node labels that define domains of interest
+ # -- domainLabels define which node labels to use as domains
+ # for CSI nodeplugins to advertise their domains
+ domainLabels:
+ # - kubernetes.io/hostname
+ # - topology.kubernetes.io/zone
+ # - topology.rook.io/rack
+
+ # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
+ # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+ # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
+ # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ cephFSAttachRequired: true
+ # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
+ # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
+ # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
+ # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
+ # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+ # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ rbdAttachRequired: true
+ # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
+ # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+ # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+ # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
+ # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+ nfsAttachRequired: true
+
+ # -- Enable discovery daemon
+ enableDiscoveryDaemon: false
+ # -- Set the discovery daemon device discovery interval (default to 60m)
+ discoveryDaemonInterval: 60m
+
+ # -- The timeout for ceph commands in seconds
+ cephCommandsTimeoutSeconds: "15"
+
+ # -- If true, run rook operator on the host network
+ useOperatorHostNetwork:
+
+ # -- If true, scale down the rook operator.
+ # This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
+ # to deploy your helm charts.
+ scaleDownOperator: false
+
+ ## Rook Discover configuration
+ ## toleration: NoSchedule, PreferNoSchedule or NoExecute
+ ## tolerationKey: Set this to the specific key of the taint to tolerate
+ ## tolerations: Array of tolerations in YAML format which will be added to agent deployment
+ ## nodeAffinity: Set to labels of the node to match
+
+ discover:
+ # -- Toleration for the discover pods.
+ # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
+ toleration: # -- The specific key of the taint to tolerate
+
+ tolerationKey: # -- Array of tolerations in YAML format which will be added to discover deployment
+
+ tolerations: # - key: key
+
+ # operator: Exists
+ # effect: NoSchedule
+ # -- The node labels for affinity of `discover-agent` [^1]
+ nodeAffinity: # key1=value1,value2; key2=value3
+
+ #
+ # or
+ #
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: storage-node
+ # operator: Exists
+ # -- Labels to add to the discover pods
+ podLabels:
+ # "key1=value1,key2=value2"
+ # -- Add resources to discover daemon pods
+
+ resources:
+ # - limits:
+ # memory: 512Mi
+ # - requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
+ hostpathRequiresPrivileged: false
+
+ # -- Disable automatic orchestration when new devices are discovered.
+ disableDeviceHotplug: false
+
+ # -- Blacklist certain disks according to the regex provided.
+ discoverDaemonUdev:
+
+ # -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+ imagePullSecrets: # - name: my-registry-secret
+
+ # -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+ enableOBCWatchOperatorNamespace: true
+
+ # -- Specify the prefix for the OBC provisioner in place of the cluster namespace
+ # @default -- `ceph cluster namespace`
+ obcProvisionerNamePrefix:
+
+ monitoring:
+ # -- Enable monitoring. Requires Prometheus to be pre-installed.
+ # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+ enabled: false
+
+ rook-ceph-cluster:
+ # Default values for a single rook-ceph cluster
+ # This is a YAML-formatted file.
+ # Declare variables to be passed into your templates.
+
+ # -- Namespace of the main rook operator
+ operatorNamespace: rook-ceph
+
+ # -- The metadata.name of the CephCluster CR
+ # @default -- The same as the namespace
+ clusterName:
+
+ # -- Optional override of the target kubernetes version
+ kubeVersion: # mon_allow_pool_delete = true
+
+ # osd_pool_default_size = 3
+ # osd_pool_default_min_size = 2
+
+ # Installs a debugging toolbox deployment
+ toolbox:
+ # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
+ enabled: true
+ # -- Toolbox image, defaults to the image used by the Ceph cluster
+ image:
+ #quay.io/ceph/ceph:v18.2.4
+ # -- Toolbox tolerations
+
+ tolerations: []
+ # -- Toolbox affinity
+ affinity: {}
+ # -- Toolbox container security context
+ containerSecurityContext:
+ runAsNonRoot: true
+ runAsUser: 2016
+ runAsGroup: 2016
+ capabilities:
+ drop: [ "ALL" ]
+ # -- Toolbox resources
+ resources:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "100m"
+ memory: "128Mi"
+ # -- Set the priority class for the toolbox if desired
+ priorityClassName:
+
+
+ monitoring:
+ # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
+ # Monitoring requires Prometheus to be pre-installed
+ enabled: false
+ # -- Whether to create the Prometheus rules for Ceph alerts
+ createPrometheusRules: false
+ # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
+ # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
+ # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
+ rulesNamespaceOverride: # Monitoring settings for external clusters:
+
+ # externalMgrEndpoints:
+ # externalMgrPrometheusPort:
+ # Scrape interval for prometheus
+ # interval: 10s
+ # allow adding custom labels and annotations to the prometheus rule
+ prometheusRule:
+ # -- Labels applied to PrometheusRule
+ labels: {}
+ # -- Annotations applied to PrometheusRule
+ annotations: {}
+
+ # -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
+ pspEnable: false
+
+ # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+ # imagePullSecrets:
+ # - name: my-registry-secret
+
+ # All values below are taken from the CephCluster CRD
+ # -- Cluster configuration.
+ # @default -- See [below](#ceph-cluster-spec)
+ cephClusterSpec:
+ # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
+ # as in the host-based example (cluster.yaml). For a different configuration such as a
+ # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
+ # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
+ # with the specs from those examples.
+
+ # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
+ cephVersion:
+ # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+ # v17 is Quincy, v18 is Reef.
+ # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
+ # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+ # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724
+ # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
+ image: quay.io/ceph/ceph:v18.2.4
+ # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
+ # Future versions such as `squid` (v19) would require this to be set to `true`.
+ # Do not set to true in production.
+ allowUnsupported: false
+
+ # The path on the host where configuration files will be persisted. Must be specified.
+ # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+ # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+ dataDirHostPath: /var/lib/rook
+
+ # Whether or not upgrade should continue even if a check fails
+ # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
+ # Use at your OWN risk
+ # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
+ skipUpgradeChecks: false
+
+ # Whether or not continue if PGs are not clean during an upgrade
+ continueUpgradeAfterChecksEvenIfNotHealthy: false
+
+ # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
+ # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
+ # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
+ # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
+ # The default wait timeout is 10 minutes.
+ waitTimeoutForHealthyOSDInMinutes: 10
+
+ # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
+ # This configuration will be ignored if `skipUpgradeChecks` is `true`.
+ # Default is false.
+ upgradeOSDRequiresHealthyPGs: false
+
+ mon:
+ # Set the number of mons to be started. Generally recommended to be 3.
+ # For highest availability, an odd number of mons should be specified.
+ count: 1
+ # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
+ # Mons should only be allowed on the same node for test environments where data loss is acceptable.
+ allowMultiplePerNode: true
+
+ mgr:
+ # When higher availability of the mgr is needed, increase the count to 2.
+ # In that case, one mgr will be active and one in standby. When Ceph updates which
+ # mgr is active, Rook will update the mgr services to match the active mgr.
+ count: 1
+ allowMultiplePerNode: true
+ modules:
+ # List of modules to optionally enable or disable.
+ # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
+ # - name: rook
+ # enabled: true
+
+ # enable the ceph dashboard for viewing cluster status
+ dashboard:
+ enabled: true
+ # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+ urlPrefix: /ceph-dashboard
+ # serve the dashboard at the given port.
+ # port: 8443
+ # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
+ # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
+ ssl: true
+
+ # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
+ network:
+ connections:
+ # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
+ # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
+ # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
+ # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
+ # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
+ # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
+ encryption:
+ enabled: false
+ # Whether to compress the data in transit across the wire. The default is false.
+ # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+ compression:
+ enabled: false
+ # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
+ # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
+ # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
+ requireMsgr2: false
+ # # enable host networking
+ # provider: host
+ # # EXPERIMENTAL: enable the Multus network provider
+ # provider: multus
+ # selectors:
+ # # The selector keys are required to be `public` and `cluster`.
+ # # Based on the configuration, the operator will do the following:
+ # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
+ # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
+ # #
+ # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
+ # #
+ # # public: public-conf --> NetworkAttachmentDefinition object name in Multus
+ # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
+ # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
+ # ipFamily: "IPv6"
+ # # Ceph daemons to listen on both IPv4 and Ipv6 networks
+ # dualStack: false
+
+ # enable the crash collector for ceph daemon crash collection
+ crashCollector:
+ disable: true
+ # Uncomment daysToRetain to prune ceph crash entries older than the
+ # specified number of days.
+ # daysToRetain: 30
+
+ # enable log collector, daemons will log on files and rotate
+ logCollector:
+ enabled: true
+ periodicity: daily # one of: hourly, daily, weekly, monthly
+ maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
+
+ # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
+ cleanupPolicy:
+ # Since cluster cleanup is destructive to data, confirmation is required.
+ # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
+ # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
+ # Rook will immediately stop configuring the cluster and only wait for the delete command.
+ # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
+ confirmation: ""
+ # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
+ sanitizeDisks:
+ # method indicates if the entire disk should be sanitized or simply ceph's metadata
+ # in both case, re-install is possible
+ # possible choices are 'complete' or 'quick' (default)
+ method: quick
+ # dataSource indicate where to get random bytes from to write on the disk
+ # possible choices are 'zero' (default) or 'random'
+ # using random sources will consume entropy from the system and will take much more time then the zero source
+ dataSource: zero
+ # iteration overwrite N times instead of the default (1)
+ # takes an integer value
+ iteration: 1
+ # allowUninstallWithVolumes defines how the uninstall should be performed
+ # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
+ allowUninstallWithVolumes: false
+
+ # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+ # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+ # tolerate taints with a key of 'storage-node'.
+ # placement:
+ # all:
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: role
+ # operator: In
+ # values:
+ # - storage-node
+ # podAffinity:
+ # podAntiAffinity:
+ # topologySpreadConstraints:
+ # tolerations:
+ # - key: storage-node
+ # operator: Exists
+ # # The above placement information can also be specified for mon, osd, and mgr components
+ # mon:
+ # # Monitor deployments may contain an anti-affinity rule for avoiding monitor
+ # # collocation on the same node. This is a required rule when host network is used
+ # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
+ # # preferred rule with weight: 50.
+ # osd:
+ # mgr:
+ # cleanup:
+
+ # annotations:
+ # all:
+ # mon:
+ # osd:
+ # cleanup:
+ # prepareosd:
+ # # If no mgr annotations are set, prometheus scrape annotations will be set by default.
+ # mgr:
+ # dashboard:
+
+ # labels:
+ # all:
+ # mon:
+ # osd:
+ # cleanup:
+ # mgr:
+ # prepareosd:
+ # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
+ # # These labels can be passed as LabelSelector to Prometheus
+ # monitoring:
+ # dashboard:
+
+ resources:
+ mgr:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "500m"
+ memory: "512Mi"
+ mon:
+ limits:
+ memory: "2Gi"
+ requests:
+ cpu: "1000m"
+ memory: "1Gi"
+ osd:
+ limits:
+ memory: "4Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+ prepareosd:
+ # limits: It is not recommended to set limits on the OSD prepare job
+ # since it's a one-time burst for memory that must be allowed to
+ # complete without an OOM kill. Note however that if a k8s
+ # limitRange guardrail is defined external to Rook, the lack of
+ # a limit here may result in a sync failure, in which case a
+ # limit should be added. 1200Mi may suffice for up to 15Ti
+ # OSDs ; for larger devices 2Gi may be required.
+ # cf. https://github.com/rook/rook/pull/11103
+ requests:
+ cpu: "500m"
+ memory: "50Mi"
+ mgr-sidecar:
+ limits:
+ memory: "100Mi"
+ requests:
+ cpu: "100m"
+ memory: "40Mi"
+ crashcollector:
+ limits:
+ memory: "60Mi"
+ requests:
+ cpu: "100m"
+ memory: "60Mi"
+ logcollector:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "100m"
+ memory: "100Mi"
+ cleanup:
+ limits:
+ memory: "1Gi"
+ requests:
+ cpu: "500m"
+ memory: "100Mi"
+ exporter:
+ limits:
+ memory: "128Mi"
+ requests:
+ cpu: "50m"
+ memory: "50Mi"
+
+ # The option to automatically remove OSDs that are out and are safe to destroy.
+ removeOSDsIfOutAndSafeToRemove: true
+
+ # priority classes to apply to ceph resources
+ priorityClassNames:
+ mon: system-node-critical
+ osd: system-node-critical
+ mgr: system-cluster-critical
+
+ storage:
+ # cluster level storage configuration and selection
+ useAllNodes: true
+ useAllDevices: true
+ # deviceFilter:
+ # config:
+ # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
+ # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+ # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
+ # osdsPerDevice: "1" # this value can be overridden at the node or device level
+ # encryptedDevice: "true" # the default value for this option is "false"
+ # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+ # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+ # nodes:
+ # - name: "172.17.4.201"
+ # devices: # specific devices to use for storage can be specified for each node
+ # - name: "sdb"
+ # - name: "nvme01" # multiple osds can be created on high performance devices
+ # config:
+ # osdsPerDevice: "5"
+ # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
+ # config: # configuration can be specified at the node level which overrides the cluster level config
+ # - name: "172.17.4.301"
+ # deviceFilter: "^sd."
+
+ # The section for configuring management of daemon disruptions during upgrade or fencing.
+ disruptionManagement:
+ # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
+ # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
+ # block eviction of OSDs by default and unblock them safely when drains are detected.
+ managePodBudgets: true
+ # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
+ # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
+ osdMaintenanceTimeout: 30
+ # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
+ # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
+ # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
+ pgHealthCheckTimeout: 0
+
+ # Configure the healthcheck and liveness probes for ceph pods.
+ # Valid values for daemons are 'mon', 'osd', 'status'
+ healthCheck:
+ daemonHealth:
+ mon:
+ disabled: false
+ interval: 45s
+ osd:
+ disabled: false
+ interval: 60s
+ status:
+ disabled: false
+ interval: 60s
+ # Change pod liveness probe, it works for all mon, mgr, and osd pods.
+ livenessProbe:
+ mon:
+ disabled: false
+ mgr:
+ disabled: false
+ osd:
+ disabled: false
+
+ ingress:
+ # -- Enable an ingress for the ceph-dashboard
+ dashboard:
+ annotations:
+ cert-manager.io/issuer: selfsigned-issuer
+ nginx.ingress.kubernetes.io/backend-protocol: HTTPS
+ nginx.ingress.kubernetes.io/server-snippet: |
+ proxy_ssl_verify off;
+ host:
+ name: ceph-vmo-lab.maas-eng.sc
+ path: "/ceph-dashboard/"
+ tls:
+ - hosts:
+ - ceph-vmo-lab.maas-eng.sc
+ secretName: ceph-dashboard-tls
+ ingressClassName: nginx
+
+ # -- A list of CephBlockPool configurations to deploy
+ # @default -- See [below](#ceph-block-pools)
+ cephBlockPools:
+ - name: ceph-blockpool
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
+ spec:
+ failureDomain: host
+ replicated:
+ size: ${worker_nodes}
+ # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
+ # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
+ # enableRBDStats: true
+ storageClass:
+ enabled: true
+ name: ceph-block
+ annotations:
+ storageclass.kubevirt.io/is-default-virt-class: "true"
+ labels: {}
+ isDefault: false
+ reclaimPolicy: Delete
+ allowVolumeExpansion: true
+ volumeBindingMode: "Immediate"
+ mountOptions: []
+ # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
+ allowedTopologies: []
+ # - matchLabelExpressions:
+ # - key: rook-ceph-role
+ # values:
+ # - storage-node
+ # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
+ parameters:
+ # (optional) mapOptions is a comma-separated list of map options.
+ # For krbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # For nbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # mapOptions: lock_on_read,queue_depth=1024
+
+ # (optional) unmapOptions is a comma-separated list of unmap options.
+ # For krbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # For nbd options refer
+ # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # unmapOptions: force
+
+ # RBD image format. Defaults to "2".
+ imageFormat: "2"
+
+ # RBD image features, equivalent to OR'd bitfield value: 63
+ # Available for imageFormat: "2". Older releases of CSI RBD
+ # support only the `layering` feature. The Linux kernel (KRBD) supports the
+ # full feature complement as of 5.4
+ imageFeatures: layering
+
+ # These secrets contain Ceph admin credentials.
+ csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
+ csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
+ csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+ # Specify the filesystem type of the volume. If not specified, csi-provisioner
+ # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+ # in hyperconverged settings where the volume is mounted on the same node as the osds.
+ csi.storage.k8s.io/fstype: ext4
+
+ # -- A list of CephFileSystem configurations to deploy
+ # @default -- See [below](#ceph-file-systems)
+ cephFileSystems:
+ - name: ceph-filesystem
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
+ spec:
+ metadataPool:
+ replicated:
+ size: ${worker_nodes}
+ dataPools:
+ - failureDomain: host
+ replicated:
+ size: ${worker_nodes}
+ # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
+ name: data0
+ metadataServer:
+ activeCount: 1
+ activeStandby: true
+ resources:
+ limits:
+ memory: "4Gi"
+ requests:
+ cpu: "1000m"
+ memory: "4Gi"
+ priorityClassName: system-cluster-critical
+ storageClass:
+ enabled: true
+ isDefault: true
+ annotations: {}
+ labels: {}
+ name: ceph-filesystem
+ # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
+ pool: data0
+ reclaimPolicy: Delete
+ allowVolumeExpansion: true
+ volumeBindingMode: "Immediate"
+ mountOptions: []
+ # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
+ parameters:
+ # The secrets contain Ceph admin credentials.
+ csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
+ csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
+ csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
+ csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
+ csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
+ # Specify the filesystem type of the volume. If not specified, csi-provisioner
+ # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
+ # in hyperconverged settings where the volume is mounted on the same node as the osds.
+ csi.storage.k8s.io/fstype: ext4
+
+ # -- Settings for the filesystem snapshot class
+ # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
+ cephFileSystemVolumeSnapshotClass:
+ enabled: false
+ name: ceph-filesystem
+ isDefault: true
+ deletionPolicy: Delete
+ annotations: {}
+ labels: {}
+ # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
+ parameters: {}
+
+ # -- Settings for the block pool snapshot class
+ # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
+ cephBlockPoolsVolumeSnapshotClass:
+ enabled: false
+ name: ceph-block
+ isDefault: false
+ deletionPolicy: Delete
+ annotations: {}
+ labels: {}
+ # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
+ parameters: {}
+
+ # -- A list of CephObjectStore configurations to deploy
+ # @default -- See [below](#ceph-object-stores)
+ cephObjectStores: []
+ ## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
+ ## For erasure coded a replicated metadata pool is required.
+ ## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
+ #cephECBlockPools:
+ # - name: ec-pool
+ # spec:
+ # metadataPool:
+ # replicated:
+ # size: 2
+ # dataPool:
+ # failureDomain: osd
+ # erasureCoded:
+ # dataChunks: 2
+ # codingChunks: 1
+ # deviceClass: hdd
+ #
+ # parameters:
+ # # clusterID is the namespace where the rook cluster is running
+ # # If you change this namespace, also change the namespace below where the secret namespaces are defined
+ # clusterID: rook-ceph # namespace:cluster
+ # # (optional) mapOptions is a comma-separated list of map options.
+ # # For krbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # # For nbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # # mapOptions: lock_on_read,queue_depth=1024
+ #
+ # # (optional) unmapOptions is a comma-separated list of unmap options.
+ # # For krbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # # For nbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # # unmapOptions: force
+ #
+ # # RBD image format. Defaults to "2".
+ # imageFormat: "2"
+ #
+ # # RBD image features, equivalent to OR'd bitfield value: 63
+ # # Available for imageFormat: "2". Older releases of CSI RBD
+ # # support only the `layering` feature. The Linux kernel (KRBD) supports the
+ # # full feature complement as of 5.4
+ # # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
+ # imageFeatures: layering
+ #
+ # storageClass:
+ # provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
+ # enabled: true
+ # name: rook-ceph-block
+ # isDefault: false
+ # annotations: { }
+ # labels: { }
+ # allowVolumeExpansion: true
+ # reclaimPolicy: Delete
+
+ # -- CSI driver name prefix for cephfs, rbd and nfs.
+ # @default -- `namespace name where rook-ceph operator is deployed`
+ csiDriverNamePrefix:
+ configOverride: |
+ [global]
+ osd_pool_default_size = 1
+ mon_warn_on_pool_no_redundancy = false
+ bdev_flock_retry = 20
+ bluefs_buffered_io = false
+ mon_data_avail_warn = 10
diff --git a/terraform/vmo-cluster/manifests/k8s-values.yaml b/terraform/vmo-cluster/manifests/k8s-values.yaml
new file mode 100644
index 0000000..b458475
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/k8s-values.yaml
@@ -0,0 +1,118 @@
+pack:
+ k8sHardening: True
+ content:
+ images:
+ - image: registry.k8s.io/coredns/coredns:v1.11.3
+ - image: registry.k8s.io/etcd:3.5.15-0
+ - image: registry.k8s.io/kube-apiserver:v1.30.6
+ - image: registry.k8s.io/kube-controller-manager:v1.30.6
+ - image: registry.k8s.io/kube-proxy:v1.30.6
+ - image: registry.k8s.io/kube-scheduler:v1.30.6
+ - image: registry.k8s.io/pause:3.9
+ - image: registry.k8s.io/pause:3.8
+ #CIDR Range for Pods in cluster
+ # Note : This must not overlap with any of the host or service network
+ podCIDR: "100.64.0.0/18"
+ #CIDR notation IP range from which to assign service cluster IPs
+ # Note : This must not overlap with any IP ranges assigned to nodes for pods.
+ serviceClusterIpRange: "100.64.64.0/18"
+ palette:
+ config:
+ dashboard:
+ identityProvider: palette
+kubeadmconfig:
+ apiServer:
+ extraArgs:
+ # Note : secure-port flag is used during kubeadm init. Do not change this flag on a running cluster
+ secure-port: "6443"
+ anonymous-auth: "true"
+ profiling: "false"
+ disable-admission-plugins: "AlwaysAdmit"
+ default-not-ready-toleration-seconds: "60"
+ default-unreachable-toleration-seconds: "60"
+ enable-admission-plugins: "AlwaysPullImages,NamespaceLifecycle,ServiceAccount,NodeRestriction,PodSecurity"
+ admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml"
+ audit-log-path: /var/log/apiserver/audit.log
+ audit-policy-file: /etc/kubernetes/audit-policy.yaml
+ audit-log-maxage: "31"
+ audit-log-maxbackup: "10"
+ audit-log-maxsize: "100"
+ authorization-mode: RBAC,Node
+ kubelet-certificate-authority: "/etc/kubernetes/pki/ca.crt"
+ tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+ extraVolumes:
+ - name: audit-log
+ hostPath: /var/log/apiserver
+ mountPath: /var/log/apiserver
+ pathType: DirectoryOrCreate
+ - name: audit-policy
+ hostPath: /etc/kubernetes/audit-policy.yaml
+ mountPath: /etc/kubernetes/audit-policy.yaml
+ readOnly: true
+ pathType: File
+ - name: pod-security-standard
+ hostPath: /etc/kubernetes/pod-security-standard.yaml
+ mountPath: /etc/kubernetes/pod-security-standard.yaml
+ readOnly: true
+ pathType: File
+ controllerManager:
+ extraArgs:
+ profiling: "false"
+ terminated-pod-gc-threshold: "25"
+ use-service-account-credentials: "true"
+ feature-gates: "RotateKubeletServerCertificate=true"
+ scheduler:
+ extraArgs:
+ profiling: "false"
+ kubeletExtraArgs:
+ read-only-port: "0"
+ event-qps: "0"
+ feature-gates: "RotateKubeletServerCertificate=true"
+ protect-kernel-defaults: "true"
+ rotate-server-certificates: "true"
+ tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"
+ files:
+ - path: hardening/audit-policy.yaml
+ targetPath: /etc/kubernetes/audit-policy.yaml
+ targetOwner: "root:root"
+ targetPermissions: "0600"
+ - path: hardening/90-kubelet.conf
+ targetPath: /etc/sysctl.d/90-kubelet.conf
+ targetOwner: "root:root"
+ targetPermissions: "0600"
+ - targetPath: /etc/kubernetes/pod-security-standard.yaml
+ targetOwner: "root:root"
+ targetPermissions: "0600"
+ content: |
+ apiVersion: apiserver.config.k8s.io/v1
+ kind: AdmissionConfiguration
+ plugins:
+ - name: PodSecurity
+ configuration:
+ apiVersion: pod-security.admission.config.k8s.io/v1
+ kind: PodSecurityConfiguration
+ defaults:
+ enforce: "baseline"
+ enforce-version: "v1.30"
+ audit: "baseline"
+ audit-version: "v1.30"
+ warn: "restricted"
+ warn-version: "v1.30"
+ audit: "restricted"
+ audit-version: "v1.30"
+ exemptions:
+ # Array of authenticated usernames to exempt.
+ usernames: []
+ # Array of runtime class names to exempt.
+ runtimeClasses: []
+ # Array of namespaces to exempt.
+ namespaces: [kube-system]
+
+ preKubeadmCommands:
+ # For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
+ - 'echo "====> Applying kernel parameters for Kubelet"'
+ - 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
+
+ postKubeadmCommands:
+ - 'chmod 600 /var/lib/kubelet/config.yaml'
+ # - 'echo "List of post kubeadm commands to be executed"'
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
new file mode 100644
index 0000000..607dfc9
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -0,0 +1,47 @@
+kubeadmconfig:
+ preKubeadmCommands:
+ - 'echo "====> Applying pre Kubeadm commands"'
+ # Force specific IP address as the Node InternalIP for kubelet
+ - apt update
+ - apt install -y grepcidr
+ - |
+ NETWORKS="10.11.136.0/24"
+ IPS=$(hostname -I)
+ for IP in $IPS
+ do
+ echo "$IP" | grepcidr "$NETWORKS" >/dev/null && echo " --node-ip=$IP" >> /etc/default/kubelet
+ if [ $? == 0 ]; then break; fi
+ done
+ # Increase audit_backlog_limit
+ - sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="audit_backlog_limit=256"/g' /etc/default/grub
+ - update-grub
+ # Clean up stale container images
+ - (crontab -l || true; echo "0 4 * * * /usr/bin/crictl -c /etc/crictl.yaml rmi --prune")| crontab -
+ # Update CA certs
+ - update-ca-certificates
+ # Start containerd with new configuration
+ - systemctl daemon-reload
+ - systemctl restart containerd
+ postKubeadmCommands:
+ - 'echo "====> Applying post Kubeadm commands"'
+ files:
+ - targetPath: /etc/containerd/config.toml
+ targetOwner: "root:root"
+ targetPermissions: "0644"
+ content: |
+ ## template: jinja
+
+ # Use config version 2 to enable new configuration fields.
+ # Config file is parsed as version 1 by default.
+ version = 2
+
+ imports = ["/etc/containerd/conf.d/*.toml"]
+
+ [plugins]
+ [plugins."io.containerd.grpc.v1.cri"]
+ sandbox_image = "registry.k8s.io/pause:3.9"
+ device_ownership_from_security_context = true
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
+ runtime_type = "io.containerd.runc.v2"
+ [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
+ SystemdCgroup = true
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
new file mode 100644
index 0000000..7cc94dc
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -0,0 +1,136 @@
+apiVersion: spectrocloud.com/v1
+kind: VmTemplate
+metadata:
+ name: ubuntu-2204
+spec:
+ description: Ubuntu 22.04
+ displayName: Ubuntu 22.04
+ icon: https://s3.amazonaws.com/manifests.spectrocloud.com/logos/ubuntu.png
+ running: false
+ dataVolumeTemplates:
+ - metadata:
+ name: ubuntu-2204
+ spec:
+ source:
+ pvc:
+ name: template-ubuntu-2204
+ namespace: vmo-golden-images
+ #storage: (errors in VMO GUI)
+ pvc:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ volumeMode: Block
+ storageClassName: ceph-block
+ template:
+ metadata:
+ annotations:
+ descheduler.alpha.kubernetes.io/evict: "true"
+ labels:
+ kubevirt.io/size: small
+ kubevirt.io/domain: hellouni
+ spec:
+ domain:
+ cpu:
+ cores: 2
+ sockets: 1
+ threads: 1
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: datavolume-os
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ interfaces:
+ - masquerade: {}
+ name: default
+ model: virtio
+ #macAddress: '00:5e:ab:cd:ef:01'
+ machine:
+ type: q35
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ memory: 2Gi
+ networks:
+ - name: default
+ pod: {}
+ volumes:
+ - dataVolume:
+ name: ubuntu-2204
+ name: datavolume-os
+ - cloudInitNoCloud:
+ userData: |
+ #cloud-config
+ ssh_pwauth: True
+ chpasswd: { expire: False }
+ password: spectro
+ disable_root: false
+ runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
+ name: cloudinitdisk
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: DataVolume
+metadata:
+ name: "template-ubuntu-2204"
+ namespace: "vmo-golden-images"
+ annotations:
+ cdi.kubevirt.io/storage.deleteAfterCompletion: "false"
+ cdi.kubevirt.io/storage.bind.immediate.requested: ""
+spec:
+ storage:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ volumeMode: Block
+ storageClassName: ceph-block
+ source:
+ registry:
+ url: "docker://gcr.io/spectro-images-public/release/vm-dashboard/os/ubuntu-container-disk:22.04"
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: StorageProfile
+metadata:
+ name: ceph-filesystem
+spec:
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode:
+ Filesystem
+ cloneStrategy: csi-clone
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: StorageProfile
+metadata:
+ name: ceph-block
+spec:
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode:
+ Block
+ cloneStrategy: csi-clone
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-values.yaml b/terraform/vmo-cluster/manifests/vmo-extras-values.yaml
new file mode 100644
index 0000000..03a8f0f
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-extras-values.yaml
@@ -0,0 +1,2 @@
+pack:
+ spectrocloud.com/install-priority: "30"
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
new file mode 100644
index 0000000..35b1244
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -0,0 +1,600 @@
+pack:
+ content:
+ images:
+ - image: gcr.io/spectro-images-public/release/spectro-vm-dashboard:4.4.10
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-operator:v1.2.0
+ - image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v6.3.4
+ - image: registry.k8s.io/sig-storage/snapshot-controller:v6.3.4
+ - image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.0.2-thick
+ - image: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller:latest-amd64
+ - image: quay.io/kubevirt/cdi-operator:v1.58.0
+ - image: quay.io/kubevirt/cdi-uploadproxy:v1.58.0
+ - image: quay.io/kubevirt/cdi-controller:v1.58.0
+ - image: quay.io/kubevirt/cdi-apiserver:v1.58.0
+ - image: quay.io/kubevirt/cdi-importer:v1.58.0
+ - image: quay.io/kubevirt/cdi-uploadserver:v1.58.0
+ - image: quay.io/kubevirt/cdi-cloner:v1.58.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-handler:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-launcher:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-exportproxy:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-exportserver:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-controller:v1.2.0
+ - image: gcr.io/spectro-images-public/release/kubevirt/virt-api:v1.2.0
+ - image: registry.k8s.io/descheduler/descheduler:v0.30.1
+ - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/os/ubuntu-container-disk:22.04
+ - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/os/fedora-container-disk:37
+ - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/vlan-filtering/ubuntu:latest
+ - image: gcr.io/spectro-images-public/release/spectro-cleanup:1.0.2
+ - image: gcr.io/spectro-images-public/release/spectro-kubectl:1.30.2-spectro-4.4.a
+ namespace: vm-dashboard
+ palette:
+ config:
+ dashboard:
+ access: private
+ spectrocloud.com/install-priority: "20"
+charts:
+ virtual-machine-orchestrator:
+ image:
+ repository: gcr.io/spectro-images-public/release/spectro-vm-dashboard
+ tag: "4.4.10"
+ service:
+ type: "ClusterIP"
+ appConfig:
+ clusterInfo:
+ consoleBaseAddress: ""
+ fullnameOverride: "virtual-machine-orchestrator"
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: "virtual-machine-orchestrator"
+ sampleTemplates:
+ fedora37: false
+ ubuntu2204: false
+ ubuntu2204WithVol: false
+ ubuntu2204staticIP: false
+ fedora37staticIP: false
+ # To create additional vm templates refer to https://docs.spectrocloud.com/vm-management/create-manage-vm/create-vm-template
+ # This namespace will be used to store golden images
+ goldenImagesNamespace: "vmo-golden-images"
+ # These namespaces will be created and set up to deploy VMs into
+ vmEnabledNamespaces:
+ - "default"
+ - "virtual-machines"
+ - ns-adv
+ - ns-edge
+ - ns-product
+ - ns-packs
+ grafana:
+ namespace: monitoring
+ vlanFiltering:
+ enabled: true
+ namespace: kube-system
+ image:
+ repository: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/vlan-filtering/ubuntu
+ pullPolicy: IfNotPresent
+ tag: "latest"
+ env:
+ # Which bridge interface to control
+ bridgeIF: "br0"
+ # Beginning of VLAN range to enable
+ allowedVlans: "128,129"
+ # Set to "true" to enable VLANs on the br0 interface for the host to use itself
+ allowVlansOnSelf: "true"
+ # Beginning of VLAN range to enable for use by the node itself
+ allowedVlansOnSelf: "128,129"
+ snapshot-controller:
+ enabled: true
+ replicas: 1
+ # controller image and policies
+ image:
+ repository: registry.k8s.io/sig-storage/snapshot-controller
+ pullPolicy: IfNotPresent
+ tag: "v6.3.4"
+ # A list/array of extra args that should be used
+ # when running the controller. Default args include log verbose level
+ # and leader election
+ extraArgs: []
+ # snapshot webhook config
+ webhook:
+ # all below values take effect only if webhook is enabled
+ enabled: true
+ # webhook controller image and policies
+ image:
+ # change the image if you wish to use your own custom validation server image
+ repository: registry.k8s.io/sig-storage/snapshot-validation-webhook
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v6.3.4"
+ validatingWebhook:
+ failurePolicy: Fail
+ timeoutSeconds: 2
+ # Validating webhook is exposed on an HTTPS endpoint, and so
+ # TLS certificate is required. This Helm chart relies on
+ # cert-manager.io for managing TLS certificates.
+ tls:
+ # If not empty, this issuer will be used to sign the certificate.
+ # If none is provided, a new, self-signing issuer will be created.
+ issuerRef: {}
+ # name:
+ # kind:
+ # group: cert-manager.io
+
+ # Certificate duration. The generated certificate will be automatically
+ # renewed 1/3 of `certDuration` before its expiry.
+ # Value must be in units accepted by Go time.ParseDuration.
+ # See https://golang.org/pkg/time/#ParseDuration for allowed formats.
+ # Minimum accepted duration is `1h`.
+ # This option may be ignored/overridden by some issuer types.
+ certDuration: 8760h
+ service:
+ # when running in cluster webhook service is recommended to be of type ClusterIP
+ type: ClusterIP
+ port: 443
+ serviceAccount:
+ # Specifies whether a service account should be created.
+ create: true
+ # Annotations to add to the service account.
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template.
+ name: ""
+ # Log verbosity level.
+ # See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md
+ # for description of individual verbosity levels.
+ logVerbosityLevel: 2
+ podAnnotations: {}
+ resources: {}
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ nameOverride: ""
+ fullnameOverride: ""
+ imagePullSecrets: []
+ nameOverride: ""
+ fullnameOverride: ""
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ # create a default volume snapshot class
+ volumeSnapshotClass:
+ create: true
+ name: "ceph-block-snapshot-class"
+ driver: "rook-ceph.rbd.csi.ceph.com"
+ # deletionPolicy determines whether a VolumeSnapshotContent created through
+ # the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ # Supported values are "Retain" and "Delete".
+ deletionPolicy: "Delete"
+ # params is a key-value map with storage driver specific parameters for creating snapshots.
+ params:
+ clusterID: rook-ceph
+ csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
+ csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
+ # key-value pair of extra labels to apply to the volumesnapshotclass
+ extraLabels:
+ velero.io/csi-volumesnapshot-class: "true"
+ # time for sleep hook in seconds
+ hooksleepTime: 12
+ # this install cert-manager latest version if not already installed
+ cert-manager:
+ enabled: false
+ installCRDs: true
+ kubevirt:
+ enabled: true
+ # defaults to kubevirt
+ namespace: kubevirt
+ namespaceLabels:
+ pod-security.kubernetes.io/enforce: privileged
+ pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
+ replicas: 1
+ service:
+ type: ClusterIP
+ port: 443
+ targetPort: 8443
+ image:
+ repository: gcr.io/spectro-images-public/release/kubevirt/virt-operator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v1.2.0"
+ ## The Kubevirt CR that gets created
+ kubevirtResource:
+ name: kubevirt
+ useEmulation: false
+ # below gates are required for virtual machine orchestrator pack, users can append additional gates
+ additionalFeatureGates:
+ - LiveMigration
+ - HotplugVolumes
+ - Snapshot
+ - VMExport
+ - ExpandDisks
+ - HotplugNICs
+ - VMLiveUpdateFeatures
+ - VMPersistentState
+ - Sidecar
+ # for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
+ config:
+ evictionStrategy: "LiveMigrate"
+ # additionalConfig lets you define any configuration other than developerConfiguration and evictionStrategy
+ additionalConfig:
+ vmStateStorageClass: "ceph-filesystem"
+ # additionalDevConfig lets you define dev config other than emulation and feature gate
+ additionalDevConfig: {}
+ # vmRolloutStrategy lets you define how changes to a VM object propagate to its VMI objects
+ vmRolloutStrategy: LiveUpdate
+ certificateRotateStrategy: {}
+ customizeComponents:
+ # flags:
+ # api:
+ # v:
+ # "5"
+ # port:
+ # "8443"
+ imagePullPolicy: IfNotPresent
+ infra: {}
+ # The name of the Prometheus service account that needs read-access to KubeVirt endpoints
+ monitorAccount: "prometheus-operator-prometheus"
+ # The namespace Prometheus is deployed in
+ monitorNamespace: "monitoring"
+ # The namespace the service monitor will be deployed. Either specify this or the monitorNamespace
+ serviceMonitorNamespace: "monitoring"
+ workloads: {}
+ workloadsUpdateStrategy:
+ workloadUpdateMethods:
+ - LiveMigrate
+ # uninstallStrategy to use, options are RemoveWorkloads, BlockUninstallIfWorkloadsExist
+ uninstallStrategy: ""
+ ingress:
+ enabled: true
+ ingressClassName: nginx
+ annotations:
+ cert-manager.io/issuer: kubevirt-selfsigned-issuer
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ labels: {}
+ hosts:
+ - host: virt-exportproxy.maas-eng.sc
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls:
+ - secretName: virt-exportproxy-tls
+ hosts:
+ - virt-exportproxy.maas-eng.sc
+ # - secretName: chart-example-tls
+ # hosts:
+ # - virt-exportproxy.maas.sc
+ cdi:
+ enabled: true
+ namespaceLabels:
+ pod-security.kubernetes.io/enforce: privileged
+ pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
+ replicas: 1
+ image:
+ repository: quay.io/kubevirt/cdi-operator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v1.58.0"
+ service:
+ type: ClusterIP
+ port: 443
+ targetPort: 8443
+ # set enabled to true and add private registry details to bring up VMs in airgap environment
+ privateRegistry:
+ enabled: false
+ registryIP: #Ex: 10.10.225.20
+ registryBasePath: #Ex: specto-images
+ ## The CDI CR that gets created
+ cdiResource:
+ additionalFeatureGates:
+ # - FeatureName
+ additionalConfig:
+ podResourceRequirements:
+ requests:
+ cpu: 1
+ memory: 2G
+ limits:
+ cpu: 2
+ memory: 8G
+ filesystemOverhead:
+ global: "0.055"
+ storageClass:
+ spectro-storage-class: "0.1"
+ #insecureRegistries: [] # List of insecure registries to allow in the CDI importer, preffered in air-gapped environments
+ #importProxy:
+ # HTTPProxy: "http://username:password@your-proxy-server:3128"
+ # HTTPSProxy: "http://username:password@your-proxy-server:3128"
+ # noProxy: "127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.company.local"
+ # TrustedCAProxy: configmap-name # optional: the ConfigMap name of a user-provided trusted certificate authority (CA) bundle to be added to the importer pod CA bundle
+ ingress:
+ enabled: true
+ className: "nginx"
+ annotations:
+ cert-manager.io/issuer: cdi-selfsigned-issuer
+ nginx.ingress.kubernetes.io/proxy-body-size: "0"
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
+ nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ hosts:
+ - host: cdi-uploadproxy.maas-eng.sc
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls:
+ - secretName: cdi-uploadproxy-tls
+ hosts:
+ - cdi-uploadproxy.maas-eng.sc
+ # - secretName: chart-example-tls
+ # hosts:
+ # - cdi-uploadproxy.maas.sc
+ multus:
+ enabled: true
+ image:
+ repository: ghcr.io/k8snetworkplumbingwg/multus-cni
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v4.0.2-thick"
+ networkController:
+ criSocket:
+ enableK3SHostPath: false # true for K3S and RKE2, false for PXK-E
+ criSocketContainerPath: /host/run/containerd/containerd.sock
+ imagePullSecrets: []
+ podAnnotations: {}
+ nodeSelector: {}
+ affinity: {}
+ dpdkCompatibility: false
+ cleanup:
+ image: gcr.io/spectro-images-public/release/spectro-cleanup
+ tag: "1.0.2"
+ networkAttachDef:
+ create: false
+ # a json string to apply
+ config: ''
+ # a sample config
+ # '{
+ # "cniVersion": "0.3.0",
+ # "type": "macvlan",
+ # "master": "ens5",
+ # "mode": "bridge",
+ # "ipam": {
+ # "type": "host-local",
+ # "subnet": "192.168.1.0/24",
+ # "rangeStart": "192.168.1.200",
+ # "rangeEnd": "192.168.1.216",
+ # "routes": [
+ # { "dst": "0.0.0.0/0" }
+ # ],
+ # "gateway": "192.168.1.1"
+ # }
+ # }'
+ descheduler:
+ enabled: true
+ namespace: "kube-system"
+ # CronJob or Deployment
+ kind: CronJob
+ image:
+ repository: registry.k8s.io/descheduler/descheduler
+ # Overrides the image tag whose default is the chart version
+ tag: "v0.30.1"
+ pullPolicy: IfNotPresent
+ imagePullSecrets:
+ # - name: container-registry-secret
+ resources:
+ requests:
+ cpu: 500m
+ memory: 256Mi
+ limits:
+ cpu: 500m
+ memory: 256Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ privileged: false
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ # podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
+ podSecurityContext: {}
+ # fsGroup: 1000
+
+ nameOverride: ""
+ fullnameOverride: "descheduler"
+ # labels that'll be applied to all resources
+ commonLabels: {}
+ cronJobApiVersion: "batch/v1"
+ schedule: "*/2 * * * *"
+ suspend: false
+ # startingDeadlineSeconds: 200
+ # successfulJobsHistoryLimit: 3
+ # failedJobsHistoryLimit: 1
+ # ttlSecondsAfterFinished 600
+ # timeZone: Etc/UTC
+
+ # Required when running as a Deployment
+ deschedulingInterval: 5m
+ # Specifies the replica count for Deployment
+ # Set leaderElection if you want to use more than 1 replica
+ # Set affinity.podAntiAffinity rule if you want to schedule onto a node
+ # only if that node is in the same zone as at least one already-running descheduler
+ replicas: 1
+ # Specifies whether Leader Election resources should be created
+ # Required when running as a Deployment
+ # NOTE: Leader election can't be activated if DryRun enabled
+ leaderElection: {}
+ # enabled: true
+ # leaseDuration: 15s
+ # renewDeadline: 10s
+ # retryPeriod: 2s
+ # resourceLock: "leases"
+ # resourceName: "descheduler"
+ # resourceNamescape: "kube-system"
+
+ command:
+ - "/bin/descheduler"
+ cmdOptions:
+ v: 3
+ # Recommended to use the latest Policy API version supported by the Descheduler app version
+ deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
+ deschedulerPolicy:
+ # nodeSelector: "key1=value1,key2=value2"
+ # maxNoOfPodsToEvictPerNode: 10
+ # maxNoOfPodsToEvictPerNamespace: 10
+ # ignorePvcPods: true
+ # evictLocalStoragePods: true
+ # evictDaemonSetPods: true
+ # tracing:
+ # collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
+ # transportCert: ""
+ # serviceName: ""
+ # serviceNamespace: ""
+ # sampleRate: 1.0
+ # fallbackToNoOpProviderOnError: true
+ profiles:
+ - name: default
+ pluginConfig:
+ - name: DefaultEvictor
+ args:
+ ignorePvcPods: true
+ evictLocalStoragePods: true
+ - name: RemoveDuplicates
+ - name: RemovePodsHavingTooManyRestarts
+ args:
+ podRestartThreshold: 100
+ includingInitContainers: true
+ - name: RemovePodsViolatingNodeAffinity
+ args:
+ nodeAffinityType:
+ - requiredDuringSchedulingIgnoredDuringExecution
+ - name: RemovePodsViolatingNodeTaints
+ - name: RemovePodsViolatingInterPodAntiAffinity
+ - name: RemovePodsViolatingTopologySpreadConstraint
+ - name: LowNodeUtilization
+ args:
+ thresholds:
+ cpu: 20
+ memory: 20
+ pods: 20
+ targetThresholds:
+ cpu: 50
+ memory: 50
+ pods: 50
+ plugins:
+ balance:
+ enabled:
+ - RemoveDuplicates
+ - RemovePodsViolatingTopologySpreadConstraint
+ - LowNodeUtilization
+ deschedule:
+ enabled:
+ - RemovePodsHavingTooManyRestarts
+ - RemovePodsViolatingNodeTaints
+ - RemovePodsViolatingNodeAffinity
+ - RemovePodsViolatingInterPodAntiAffinity
+ priorityClassName: system-cluster-critical
+ nodeSelector: {}
+ # foo: bar
+
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+ # podAntiAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # - labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - descheduler
+ # topologyKey: "kubernetes.io/hostname"
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: kubernetes.io/hostname
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/name: descheduler
+ tolerations: []
+ # - key: 'management'
+ # operator: 'Equal'
+ # value: 'tool'
+ # effect: 'NoSchedule'
+
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+ # Specifies custom annotations for the serviceAccount
+ annotations: {}
+ podAnnotations: {}
+ podLabels: {}
+ dnsConfig: {}
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10258
+ scheme: HTTPS
+ initialDelaySeconds: 3
+ periodSeconds: 10
+ service:
+ enabled: false
+ # @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
+ #
+ ipFamilyPolicy: ""
+ # @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
+ # E.g.
+ # ipFamilies:
+ # - IPv6
+ # - IPv4
+ ipFamilies: []
+ serviceMonitor:
+ enabled: false
+ # The namespace where Prometheus expects to find service monitors.
+ # namespace: ""
+ # Add custom labels to the ServiceMonitor resource
+ additionalLabels: {}
+ # prometheus: kube-prometheus-stack
+ interval: ""
+ # honorLabels: true
+ insecureSkipVerify: true
+ serverName: null
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'descheduler_(build_info|pods_evicted)'
+ # sourceLabels: [__name__]
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
diff --git a/terraform/vmo-cluster/provider.tf b/terraform/vmo-cluster/provider.tf
new file mode 100644
index 0000000..8294ea0
--- /dev/null
+++ b/terraform/vmo-cluster/provider.tf
@@ -0,0 +1,29 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+terraform {
+ required_providers {
+ spectrocloud = {
+ version = ">= 0.22.2"
+ source = "spectrocloud/spectrocloud"
+ }
+
+ tls = {
+ source = "hashicorp/tls"
+ version = "4.0.4"
+ }
+
+ local = {
+ source = "hashicorp/local"
+ version = "2.4.1"
+ }
+ }
+
+ required_version = ">= 1.9"
+}
+
+
+provider "spectrocloud" {
+ # API key set through the environment variable SPECTROCLOUD_APIKEY
+ project_name = var.palette-project
+}
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
new file mode 100644
index 0000000..9cd7a18
--- /dev/null
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -0,0 +1,26 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+
+#####################
+# Palette Settings
+#####################
+palette-project = "Default" # The name of your project in Palette.
+
+############################
+# MAAS Deployment Settings
+############################
+deploy-maas = false # Set to true to deploy to MAAS.
+deploy-maas-vm = false # Set to true to create a VM on MAAS cluster once deployed.
+
+pcg-name = "REPLACE ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "REPLACE ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+
+maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
+maas-worker-resource-pool = "REPLACE ME" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["REPLACE ME"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["REPLACE ME"] # Provide a set of node tags for the worker nodes.
+
+maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
+maas-control-plane-resource-pool = "REPLACE ME" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["REPLACE ME"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["REPLACE ME"] # Provide a set of node tags for the control plane nodes.
diff --git a/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
new file mode 100644
index 0000000..78eaaef
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
@@ -0,0 +1,37 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 4 - Verify PCG name, domain, resource pools, AZs and node tags cannot be empty.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = ""
+ maas-domain = ""
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = ""
+ maas-worker-azs = []
+ maas-worker-node-tags = []
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = ""
+ maas-control-plane-azs = []
+ maas-control-plane-node-tags = []
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.pcg-name,
+ var.maas-domain,
+ var.maas-worker-resource-pool,
+ var.maas-worker-azs,
+ var.maas-worker-node-tags,
+ var.maas-control-plane-resource-pool,
+ var.maas-control-plane-azs,
+ var.maas-control-plane-node-tags
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
new file mode 100644
index 0000000..c89a7b9
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
@@ -0,0 +1,37 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 5 - Verify PCG name, domain, resource pools, AZs and node tags cannot have REPLACE ME values.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = "REPLACE ME"
+ maas-domain = "REPLACE ME"
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = "REPLACE ME"
+ maas-worker-azs = ["REPLACE ME"]
+ maas-worker-node-tags = ["REPLACE ME"]
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = "REPLACE ME"
+ maas-control-plane-azs = ["REPLACE ME"]
+ maas-control-plane-node-tags = ["REPLACE ME"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.pcg-name,
+ var.maas-domain,
+ var.maas-worker-resource-pool,
+ var.maas-worker-azs,
+ var.maas-worker-node-tags,
+ var.maas-control-plane-resource-pool,
+ var.maas-control-plane-azs,
+ var.maas-control-plane-node-tags
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
new file mode 100644
index 0000000..60f9ae4
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
@@ -0,0 +1,42 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 3 - Verify MAAS profile, cluster and VM are correctly planned when values are provided.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = true
+ pcg-name = "test-pcg"
+ maas-domain = "test-domain"
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = "test-worker-pool"
+ maas-worker-azs = ["test-worker-az"]
+ maas-worker-node-tags = ["test-worker-tags"]
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = "test-cp-pool"
+ maas-control-plane-azs = ["test-cp-az"]
+ maas-control-plane-node-tags = ["test-cp-tags"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ assert {
+ condition = length(spectrocloud_cluster_profile.maas-vmo-profile) == 1
+ error_message = "No MAAS cluster profile was created"
+ }
+
+ assert {
+ condition = length(spectrocloud_cluster_maas.maas-cluster) == 1
+ error_message = "No MAAS cluster was created"
+ }
+
+ assert {
+ condition = length(spectrocloud_virtual_machine.virtual-machine) == 1
+ error_message = "No MAAS VM was created"
+ }
+
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
new file mode 100644
index 0000000..3d24108
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
@@ -0,0 +1,31 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 6 - Verify control plane and worker nodes cannot be set to 0.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = "test-pcg"
+ maas-domain = "test-domain"
+ maas-worker-nodes = 0
+ maas-worker-resource-pool = "test-worker-pool"
+ maas-worker-azs = ["test-worker-az"]
+ maas-worker-node-tags = ["test-worker-tags"]
+ maas-control-plane-nodes = 0
+ maas-control-plane-resource-pool = "test-cp-pool"
+ maas-control-plane-azs = ["test-cp-az"]
+ maas-control-plane-node-tags = ["test-cp-tags"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.maas-worker-nodes,
+ var.maas-control-plane-nodes
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
new file mode 100644
index 0000000..6b2b426
--- /dev/null
+++ b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
@@ -0,0 +1,37 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 2 - Verify MAAS profile and cluster are correctly planned when values are provided.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ pcg-name = "test-pcg"
+ maas-domain = "test-domain"
+ maas-worker-nodes = 1
+ maas-worker-resource-pool = "test-worker-pool"
+ maas-worker-azs = ["test-worker-az"]
+ maas-worker-node-tags = ["test-worker-tags"]
+ maas-control-plane-nodes = 1
+ maas-control-plane-resource-pool = "test-cp-pool"
+ maas-control-plane-azs = ["test-cp-az"]
+ maas-control-plane-node-tags = ["test-cp-tags"]
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ assert {
+ condition = length(spectrocloud_cluster_profile.maas-vmo-profile) == 1
+ error_message = "No MAAS cluster profile was created"
+ }
+
+ assert {
+ condition = length(spectrocloud_cluster_maas.maas-cluster) == 1
+ error_message = "No MAAS cluster was created"
+ }
+
+}
diff --git a/terraform/vmo-cluster/tests/project-palette.tftest.hcl b/terraform/vmo-cluster/tests/project-palette.tftest.hcl
new file mode 100644
index 0000000..49de946
--- /dev/null
+++ b/terraform/vmo-cluster/tests/project-palette.tftest.hcl
@@ -0,0 +1,16 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 1 - Verify Palette Project is not allowed empty value.
+
+variables {
+ palette-project = ""
+}
+
+run "project_variable" {
+
+ command = plan
+
+ expect_failures = [
+ var.palette-project
+ ]
+}
diff --git a/terraform/vmo-cluster/virtual-machines/cloud-init b/terraform/vmo-cluster/virtual-machines/cloud-init
new file mode 100644
index 0000000..935c9a1
--- /dev/null
+++ b/terraform/vmo-cluster/virtual-machines/cloud-init
@@ -0,0 +1,22 @@
+#cloud-config
+ssh_pwauth: True
+chpasswd: { expire: False }
+password: spectro
+disable_root: false
+runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
new file mode 100644
index 0000000..9a07542
--- /dev/null
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -0,0 +1,106 @@
+
+##########################
+# MAAS Virtual Machine
+##########################
+resource "spectrocloud_virtual_machine" "virtual-machine" {
+ count = var.deploy-maas-vm ? 1 : 0
+ depends_on = [spectrocloud_cluster_maas.maas-cluster]
+
+ cluster_uid = data.spectrocloud_cluster.maas_vmo_cluster[0].id
+ cluster_context = data.spectrocloud_cluster.maas_vmo_cluster[0].context
+
+ run_on_launch = true
+ namespace = "default"
+ name = "ubuntu-tutorial-vm"
+
+ timeouts {
+ create = "60m"
+ }
+
+ labels = {
+ "tf" = "spectrocloud-tutorials"
+ "kubevirt.io/vm" = "ubuntu-tutorial-vm"
+ }
+
+ data_volume_templates {
+ metadata {
+ name = "ubuntu-tutorial-vm"
+ }
+ spec {
+ source {
+ pvc {
+ name = "template-ubuntu-2204"
+ namespace = "vmo-golden-images"
+ }
+ }
+ pvc {
+ access_modes = ["ReadWriteMany"]
+ resources {
+ requests = {
+ storage = "50Gi"
+ }
+ }
+ storage_class_name = "ceph-block"
+ volume_mode = "Block"
+ }
+ }
+ }
+
+ volume {
+ name = "ubuntu-tutorial-vm"
+ volume_source {
+ data_volume {
+ name = "ubuntu-tutorial-vm"
+ }
+ }
+ }
+
+ volume {
+ name = "cloudinitdisk"
+ volume_source {
+ cloud_init_no_cloud {
+ user_data = file("virtual-machines/cloud-init")
+ }
+ }
+ }
+
+ disk {
+ name = "ubuntu-tutorial-vm"
+ disk_device {
+ disk {
+ bus = "virtio"
+ }
+ }
+ }
+ disk {
+ name = "cloudinitdisk"
+ disk_device {
+ disk {
+ bus = "virtio"
+ }
+ }
+ }
+
+ cpu {
+ cores = 2
+ sockets = 1
+ threads = 1
+ }
+ memory {
+ guest = "4Gi"
+ }
+
+ resources {}
+
+ interface {
+ name = "default"
+ interface_binding_method = "InterfaceMasquerade"
+ }
+
+ network {
+ name = "default"
+ network_source {
+ pod {}
+ }
+ }
+}
From 56eabe02f4d376026d0c0092468bc36f001eb724 Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Tue, 20 May 2025 15:24:33 -0400
Subject: [PATCH 02/11] Saving-Work
---
terraform/vmo-cluster/cluster_profiles.tf | 13 +-
terraform/vmo-cluster/clusters.tf | 10 +-
terraform/vmo-cluster/data.tf | 14 +-
terraform/vmo-cluster/inputs.tf | 194 +++++++++
.../vmo-cluster/manifests/k8s-values.yaml | 4 +-
.../vmo-cluster/manifests/metallb-values.yaml | 383 ++++++++++++++++++
.../vmo-cluster/manifests/ubuntu-values.yaml | 4 +-
.../vmo-cluster/manifests/vmo-values.yaml | 2 +
terraform/vmo-cluster/terraform.tfvars | 70 +++-
.../maas-cluster-missing-values.tftest.hcl | 17 +
.../maas-cluster-replace-values.tftest.hcl | 17 +
.../tests/maas-cluster-vm.tftest.hcl | 17 +
.../tests/maas-cluster-zero-nodes.tftest.hcl | 19 +-
.../vmo-cluster/tests/maas-cluster.tftest.hcl | 17 +
terraform/vmo-cluster/virtual_machines.tf | 14 +-
15 files changed, 758 insertions(+), 37 deletions(-)
create mode 100644 terraform/vmo-cluster/manifests/metallb-values.yaml
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
index 04bfbf7..4be5c9e 100644
--- a/terraform/vmo-cluster/cluster_profiles.tf
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -5,12 +5,12 @@
resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
count = var.deploy-maas ? 1 : 0
- name = "tf-maas-vmo-profile"
+ name = var.vmo-cluster-name
description = "A basic cluster profile for MAAS VMO"
tags = concat(var.tags, ["env:maas"])
cloud = "maas"
- type = "cluster"
- version = "1.0.0"
+ type = var.cluster-profile-type
+ version = var.cluster-profile-version
pack {
name = data.spectrocloud_pack.maas_ubuntu.name
@@ -46,6 +46,13 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
type = "spectro"
}
+ pack {
+ name = "lb-metallb-helm"
+ tag = "1.14.x"
+ uid = data.spectrocloud_pack.maas_metallb.id
+ values = file("manifests/metallb-values.yaml")
+ }
+
pack {
name = data.spectrocloud_pack.maas_vmo.name
tag = data.spectrocloud_pack.maas_vmo.version
diff --git a/terraform/vmo-cluster/clusters.tf b/terraform/vmo-cluster/clusters.tf
index aecec17..89ba609 100644
--- a/terraform/vmo-cluster/clusters.tf
+++ b/terraform/vmo-cluster/clusters.tf
@@ -8,7 +8,7 @@
resource "spectrocloud_cluster_maas" "maas-cluster" {
count = var.deploy-maas ? 1 : 0
- name = "vmo-cluster-maas"
+ name = var.vmo-cluster-name
tags = concat(var.tags, ["env:maas"])
cloud_account_id = data.spectrocloud_cloudaccount_maas.account[0].id
pause_agent_upgrades = "unlock"
@@ -28,8 +28,8 @@ resource "spectrocloud_cluster_maas" "maas-cluster" {
azs = var.maas-control-plane-azs
node_tags = var.maas-control-plane-node-tags
instance_type {
- min_cpu = 8
- min_memory_mb = 16000
+ min_cpu = var.ctl-node-min-cpu
+ min_memory_mb = var.ctl-node-min-memory-mb
}
placement {
resource_pool = var.maas-control-plane-resource-pool
@@ -42,8 +42,8 @@ resource "spectrocloud_cluster_maas" "maas-cluster" {
azs = var.maas-worker-azs
node_tags = var.maas-worker-node-tags
instance_type {
- min_cpu = 8
- min_memory_mb = 32000
+ min_cpu = var.wrk-node-min-cpu
+ min_memory_mb = var.wrk-node-min-memory-mb
}
placement {
resource_pool = var.maas-worker-resource-pool
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
index e820e86..581f342 100644
--- a/terraform/vmo-cluster/data.tf
+++ b/terraform/vmo-cluster/data.tf
@@ -25,25 +25,31 @@ data "spectrocloud_pack" "maas_ubuntu" {
data "spectrocloud_pack" "maas_k8s" {
name = "kubernetes"
- version = "1.30.6"
+ version = "1.32.2"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_cni" {
name = "cni-cilium-oss"
- version = "1.15.3"
+ version = "1.17.1"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_csi" {
name = "csi-rook-ceph-helm"
- version = "1.14.9"
+ version = "1.16.3"
+ registry_uid = data.spectrocloud_registry.public_registry.id
+}
+
+data "spectrocloud_pack" "maas_metallb" {
+ name = "lb-metallb-helm"
+ version = "0.14.9"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_vmo" {
name = "virtual-machine-orchestrator"
- version = "4.4.10"
+ version = "4.6.3"
registry_uid = data.spectrocloud_registry.public_registry.id
}
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index 463089b..b0ac1c3 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -31,6 +31,30 @@ variable "tags" {
]
}
+# ###########################
+# # manifests/k8s-values.yaml
+# ###########################
+
+# variable "pod_CIDR" {
+# type = string
+# description = "Subnet range to be used for pods in the cluster."
+# }
+
+# variable "serviceClusterIpRange" {
+# type = string
+# description = "Subnet range to use for Cluster Services."
+#}
+
+
+# ################################
+# # manifests/metallb-values.yaml
+# ################################
+
+# variable "metallb_ip_pool" {
+# type = number
+# description = "IP addresses to be assigned to MetalLB. Format 1.1.1.1, 1.1.1.2 or '1.1.1.1-1.1.1.2"
+#}
+
######
# MAAS
######
@@ -146,3 +170,173 @@ variable "maas-control-plane-node-tags" {
error_message = "Provide a valid set of node tags for control plane nodes."
}
}
+
+variable "vmo-cluster-name" {
+ type = string
+ description = "Set of node tags for the MAAS control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.vmo-cluster-name != "REPLACE ME" && var.vmo-cluster-name != "" : true
+ error_message = "Provide a valid set of node tags for control plane nodes."
+ }
+}
+
+variable "cluster-profile-type" {
+ type = string
+ description = "Identifies profile type of Infrastructure, Full, or Add-on."
+
+ validation {
+ condition = var.deploy-maas ? var.cluster-profile-type != "REPLACE ME" && var.cluster-profile-type != "" : true
+ error_message = "Provide a valid cluster profile type."
+ }
+}
+
+variable "cluster-profile-version" {
+ type = string
+ description = "Set the version number of the cluster profile to be created"
+
+ validation {
+ condition = var.deploy-maas ? var.cluster-profile-version != "REPLACE ME" && var.cluster-profile-version != "" : true
+ error_message = "Provide a valid version number."
+ }
+}
+
+variable "ctl-node-min-cpu" {
+ type = number
+ description = "Set the minimum number of CPU cores to be used for the control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.ctl-node-min-cpu > 0 : true
+ error_message = "Provide a valid number of CPU cores to be used for control plane nodes."
+ }
+}
+
+variable "ctl-node-min-memory-mb" {
+ type = number
+ description = "Set the minimum amount of Memory to be used for the control plane nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.ctl-node-min-memory-mb > 0 : true
+ error_message = "Provide a valid number amount of Memory to be used control plane nodes."
+ }
+}
+
+variable "wrk-node-min-cpu" {
+ type = number
+ description = "Set the minimum number of CPU cores to be used for the worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.wrk-node-min-cpu > 0 : true
+ error_message = "Provide a valid number of CPU cores to be used for control plane nodes."
+ }
+}
+
+variable "wrk-node-min-memory-mb" {
+ type = number
+ description = "Set the minimum amount of Memory to be used for the worker nodes."
+
+ validation {
+ condition = var.deploy-maas ? var.wrk-node-min-memory-mb > 0 : true
+ error_message = "Provide a valid amount of Memory to be used for the worker nodes."
+ }
+}
+
+variable "pod_CIDR" {
+ type = string
+ description = "Set the subnet your K8s pods will use."
+
+ validation {
+ condition = var.deploy-maas ? var.pod_CIDR != "REPLACE ME" && var.pod_CIDR != "" : true
+ error_message = "Provide a valid subnet in CIDR format ex: 1.1.1.1/24."
+ }
+}
+
+variable "serviceClusterIpRange" {
+ type = string
+ description = "Set the subnet your K8s services will use."
+
+ validation {
+ condition = var.deploy-maas ? var.serviceClusterIpRange != "REPLACE ME" && var.serviceClusterIpRange != "" : true
+ error_message = "Provide a valid subnet in CIDR format ex: 1.1.1.1/24."
+ }
+}
+
+variable "metallb-ip-pool" {
+ type = set(string)
+ description = "Set the IP addresses or subnet range for MetalLB to use for ingress."
+
+ validation {
+ condition = var.deploy-maas ? var.metallb-ip-pool != "REPLACE ME" && var.metallb-ip-pool != "" : true
+ error_message = "Provide valid IP addresses or subnet range for MetalLB to use for ingress."
+ }
+}
+
+variable "vm-deploy-namespace" {
+ type = string
+ description = "Set the target namespace where your VM will be deployed."
+
+ validation {
+ condition = var.deploy-maas ? var.vm-deploy-namespace != "REPLACE ME" && var.vm-deploy-namespace != "" : true
+ error_message = "Provide valid namespace where your VM will be deployed."
+ }
+}
+
+variable "vm-deploy-name" {
+ type = string
+ description = "Provide a valid name for your VM."
+
+ validation {
+ condition = var.deploy-maas ? var.vm-deploy-name != "REPLACE ME" && var.vm-deploy-name != "" : true
+ error_message = "Provide a valid name for your VM."
+ }
+}
+
+variable "vm-storage-Gi" {
+ type = string
+ description = "The amount of storage your VM will have."
+
+ validation {
+ condition = var.deploy-maas ? var.vm-storage-Gi != "REPLACE ME" && var.vm-storage-Gi != "" : true
+ error_message = "Provide a valid amount of storage for your VM. Include Gi at the end. Example 50Gi."
+ }
+}
+
+variable "vm-cpu-cores" {
+ type = number
+ description = "Set the minimum number of CPU cores to be used for the control plane nodes."
+
+ validation {
+ condition = var.deploy-maas-vm ? var.vm-cpu-cores > 0 : true
+ error_message = "Provide a valid number of CPU cores to be used for control plane nodes."
+ }
+}
+
+variable "vm-cpu-sockets" {
+ type = number
+ description = "The number of CPU sockets the VM will use. This can be multiple to allow for hardware failure."
+
+ validation {
+ condition = var.deploy-maas-vm ? var.vm-cpu-sockets > 0 : true
+ error_message = "Provide a valid number of CPU sockets to be used by the VM. This can be multiple to allow for hardware failure."
+ }
+}
+
+variable "vm-cpu-threads" {
+ type = number
+ description = "Set the number of CPU threads the VM will use."
+
+ validation {
+ condition = var.deploy-maas-vm ? var.vm-cpu-threads > 0 : true
+ error_message = "Provide a valid number of CPU threads the VM will use."
+ }
+}
+
+variable "vm-memory-Gi" {
+ type = string
+ description = "The amount of memory your VM will have."
+
+ validation {
+ condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && var.vm-memory-Gi != "" : true
+ error_message = "Provide a valid amount of memory for your VM. Include Gi at the end. Example 4Gi."
+ }
+}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/k8s-values.yaml b/terraform/vmo-cluster/manifests/k8s-values.yaml
index b458475..8bc160c 100644
--- a/terraform/vmo-cluster/manifests/k8s-values.yaml
+++ b/terraform/vmo-cluster/manifests/k8s-values.yaml
@@ -12,10 +12,10 @@ pack:
- image: registry.k8s.io/pause:3.8
#CIDR Range for Pods in cluster
# Note : This must not overlap with any of the host or service network
- podCIDR: "100.64.0.0/18"
+ podCIDR: var.pod_CIDR
#CIDR notation IP range from which to assign service cluster IPs
# Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: "100.64.64.0/18"
+ serviceClusterIpRange: var.serviceClusterIpRange
palette:
config:
dashboard:
diff --git a/terraform/vmo-cluster/manifests/metallb-values.yaml b/terraform/vmo-cluster/manifests/metallb-values.yaml
new file mode 100644
index 0000000..84e635d
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/metallb-values.yaml
@@ -0,0 +1,383 @@
+pack:
+ content:
+ images:
+ - image: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/controller:v0.14.9
+ - image: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/speaker:v0.14.9
+ - image: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/frr:9.1.0
+ - image: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/kube-rbac-proxy:v0.12.0
+ charts:
+ - repo: https://metallb.github.io/metallb
+ name: metallb
+ version: 0.14.9
+ namespace: metallb-system
+ namespaceLabels:
+ "metallb-system": "pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v{{ .spectro.system.kubernetes.version | substr 0 4 }}" # Do not change this namespace, since CRDs expect the namespace to be metallb-system
+charts:
+ metallb-full:
+ configuration:
+ ipaddresspools:
+ first-pool:
+ spec:
+ addresses:
+ - var.metallb-ip-pool
+ # - 192.168.100.50-192.168.100.60
+ avoidBuggyIPs: true # Buggy IPs are any .0 or .255 addresses. These are commonly used for subnet ID and broadcast addresses.
+ autoAssign: true
+ l2advertisements:
+ default:
+ spec:
+ ipAddressPools:
+ - first-pool
+ bgpadvertisements: {}
+ # external:
+ # spec:
+ # ipAddressPools:
+ # - bgp-pool
+ # # communities:
+ # # - vpn-only
+
+ bgppeers: {}
+ # bgp-peer-1:
+ # spec:
+ # myASN: 64512
+ # peerASN: 64512
+ # peerAddress: 172.30.0.3
+ # peerPort: 180
+ # # BFD profiles can only be used in FRR mode
+ # # bfdProfile: bfd-profile-1
+
+ communities: {}
+ # community-1:
+ # spec:
+ # communities:
+ # - name: vpn-only
+ # value: 1234:1
+
+ bfdprofiles: {}
+ # bfd-profile-1:
+ # spec:
+ # receiveInterval: 380
+ # transmitInterval: 270
+ metallb:
+ # Default values for metallb.
+ # This is a YAML-formatted file.
+ # Declare variables to be passed into your templates.
+ imagePullSecrets: []
+ nameOverride: ""
+ fullnameOverride: ""
+ loadBalancerClass: ""
+ # To configure MetalLB, you must specify ONE of the following two
+ # options.
+ rbac:
+ # create specifies whether to install and use RBAC rules.
+ create: true
+ prometheus:
+ # scrape annotations specifies whether to add Prometheus metric
+ # auto-collection annotations to pods. See
+ # https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml
+ # for a corresponding Prometheus configuration. Alternatively, you
+ # may want to use the Prometheus Operator
+ # (https://github.com/coreos/prometheus-operator) for more powerful
+ # monitoring configuration. If you use the Prometheus operator, this
+ # can be left at false.
+ scrapeAnnotations: false
+ # port both controller and speaker will listen on for metrics
+ metricsPort: 7472
+ # if set, enables rbac proxy on the controller and speaker to expose
+ # the metrics via tls.
+ # secureMetricsPort: 9120
+
+ # the name of the secret to be mounted in the speaker pod
+ # to expose the metrics securely. If not present, a self signed
+ # certificate to be used.
+ speakerMetricsTLSSecret: ""
+ # the name of the secret to be mounted in the controller pod
+ # to expose the metrics securely. If not present, a self signed
+ # certificate to be used.
+ controllerMetricsTLSSecret: ""
+ # prometheus doesn't have the permission to scrape all namespaces so we give it permission to scrape metallb's one
+ rbacPrometheus: true
+ # the service account used by prometheus
+ # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
+ serviceAccount: ""
+ # the namespace where prometheus is deployed
+ # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true "
+ namespace: ""
+ # the image to be used for the kuberbacproxy container
+ rbacProxy:
+ repository: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/kube-rbac-proxy
+ tag: v0.12.0
+ pullPolicy:
+ # Prometheus Operator PodMonitors
+ podMonitor:
+ # enable support for Prometheus Operator
+ enabled: false
+ # optional additional labels for podMonitors
+ additionalLabels: {}
+ # optional annotations for podMonitors
+ annotations: {}
+ # Job label for scrape target
+ jobLabel: "app.kubernetes.io/name"
+ # Scrape interval. If not set, the Prometheus default scrape interval is used.
+ interval:
+ # metric relabel configs to apply to samples before ingestion.
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # target_label: nodename
+ # replacement: $1
+ # action: replace
+ # Prometheus Operator ServiceMonitors. To be used as an alternative
+ # to podMonitor, supports secure metrics.
+ serviceMonitor:
+ # enable support for Prometheus Operator
+ enabled: false
+ speaker:
+ # optional additional labels for the speaker serviceMonitor
+ additionalLabels: {}
+ # optional additional annotations for the speaker serviceMonitor
+ annotations: {}
+ # optional tls configuration for the speaker serviceMonitor, in case
+ # secure metrics are enabled.
+ tlsConfig:
+ insecureSkipVerify: true
+ controller:
+ # optional additional labels for the controller serviceMonitor
+ additionalLabels: {}
+ # optional additional annotations for the controller serviceMonitor
+ annotations: {}
+ # optional tls configuration for the controller serviceMonitor, in case
+ # secure metrics are enabled.
+ tlsConfig:
+ insecureSkipVerify: true
+ # Job label for scrape target
+ jobLabel: "app.kubernetes.io/name"
+ # Scrape interval. If not set, the Prometheus default scrape interval is used.
+ interval:
+ # metric relabel configs to apply to samples before ingestion.
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # target_label: nodename
+ # replacement: $1
+ # action: replace
+ # Prometheus Operator alertmanager alerts
+ prometheusRule:
+ # enable alertmanager alerts
+ enabled: false
+ # optional additional labels for prometheusRules
+ additionalLabels: {}
+ # optional annotations for prometheusRules
+ annotations: {}
+ # MetalLBStaleConfig
+ staleConfig:
+ enabled: true
+ labels:
+ severity: warning
+ # MetalLBConfigNotLoaded
+ configNotLoaded:
+ enabled: true
+ labels:
+ severity: warning
+ # MetalLBAddressPoolExhausted
+ addressPoolExhausted:
+ enabled: true
+ labels:
+ severity: critical
+ addressPoolUsage:
+ enabled: true
+ thresholds:
+ - percent: 75
+ labels:
+ severity: warning
+ - percent: 85
+ labels:
+ severity: warning
+ - percent: 95
+ labels:
+ severity: critical
+ # MetalLBBGPSessionDown
+ bgpSessionDown:
+ enabled: true
+ labels:
+ severity: critical
+ extraAlerts: []
+ # controller contains configuration specific to the MetalLB cluster
+ # controller.
+ controller:
+ enabled: true
+ # -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
+ logLevel: info
+ # command: /controller
+ # webhookMode: enabled
+ image:
+ repository: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/controller
+ tag: v0.14.9
+ pullPolicy:
+ ## @param controller.updateStrategy.type Metallb controller deployment strategy type.
+ ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
+ ## e.g:
+ ## strategy:
+ ## type: RollingUpdate
+ ## rollingUpdate:
+ ## maxSurge: 25%
+ ## maxUnavailable: 25%
+ ##
+ strategy:
+ type: RollingUpdate
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use. If not set and create is
+ # true, a name is generated using the fullname template
+ name: ""
+ annotations: {}
+ securityContext:
+ runAsNonRoot: true
+ # nobody
+ runAsUser: 65534
+ fsGroup: 65534
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+ nodeSelector: {}
+ tolerations: []
+ priorityClassName: ""
+ runtimeClassName: ""
+ affinity: {}
+ podAnnotations: {}
+ labels: {}
+ livenessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ tlsMinVersion: "VersionTLS12"
+ tlsCipherSuites: ""
+ extraContainers: []
+ # speaker contains configuration specific to the MetalLB speaker
+ # daemonset.
+ speaker:
+ enabled: true
+ # command: /speaker
+ # -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none`
+ logLevel: info
+ tolerateMaster: true
+ memberlist:
+ enabled: true
+ mlBindPort: 7946
+ mlBindAddrOverride: ""
+ mlSecretKeyPath: "/etc/ml_secret_key"
+ excludeInterfaces:
+ enabled: true
+ # ignore the exclude-from-external-loadbalancer label
+ ignoreExcludeLB: false
+ image:
+ repository: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/speaker
+ tag: v0.14.9
+ pullPolicy:
+ ## @param speaker.updateStrategy.type Speaker daemonset strategy type
+ ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
+ ##
+ updateStrategy:
+ ## StrategyType
+ ## Can be set to RollingUpdate or OnDelete
+ ##
+ type: RollingUpdate
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use. If not set and create is
+ # true, a name is generated using the fullname template
+ name: ""
+ annotations: {}
+ securityContext: {}
+ ## Defines a secret name for the controller to generate a memberlist encryption secret
+ ## By default secretName: {{ "metallb.fullname" }}-memberlist
+ ##
+ # secretName:
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 100Mi
+ nodeSelector: {}
+ tolerations: []
+ priorityClassName: ""
+ affinity: {}
+ ## Selects which runtime class will be used by the pod.
+ runtimeClassName: ""
+ podAnnotations: {}
+ labels: {}
+ livenessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ readinessProbe:
+ enabled: true
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ startupProbe:
+ enabled: true
+ failureThreshold: 30
+ periodSeconds: 5
+ # frr contains configuration specific to the MetalLB FRR container,
+ # for speaker running alongside FRR.
+ frr:
+ enabled: false
+ image:
+ repository: us-docker.pkg.dev/palette-images/packs/metallb/0.14.9/frr
+ tag: 9.1.0
+ pullPolicy:
+ metricsPort: 7473
+ resources: {}
+ # if set, enables a rbac proxy sidecar container on the speaker to
+ # expose the frr metrics via tls.
+ # secureMetricsPort: 9121
+ reloader:
+ resources: {}
+ frrMetrics:
+ resources: {}
+ extraContainers: []
+ crds:
+ enabled: true
+ validationFailurePolicy: Fail
+ # frrk8s contains the configuration related to using an frrk8s instance
+ # (github.com/metallb/frr-k8s) as the backend for the BGP implementation.
+ # This allows configuring additional frr parameters in combination to those
+ # applied by MetalLB.
+ frrk8s:
+ # if set, enables frrk8s as a backend. This is mutually exclusive to frr
+ # mode.
+ enabled: false
+ external: false
+ namespace: ""
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
index 607dfc9..00474c6 100644
--- a/terraform/vmo-cluster/manifests/ubuntu-values.yaml
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -5,7 +5,7 @@ kubeadmconfig:
- apt update
- apt install -y grepcidr
- |
- NETWORKS="10.11.136.0/24"
+ NETWORKS="10.11.136.128/27"
IPS=$(hostname -I)
for IP in $IPS
do
@@ -44,4 +44,4 @@ kubeadmconfig:
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
- SystemdCgroup = true
+ SystemdCgroup = true
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
index 35b1244..8fb06e8 100644
--- a/terraform/vmo-cluster/manifests/vmo-values.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -226,6 +226,8 @@ charts:
- VMLiveUpdateFeatures
- VMPersistentState
- Sidecar
+ - VolumeMigration
+ - CPUManager
# for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
config:
evictionStrategy: "LiveMigrate"
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 9cd7a18..8a2bfbd 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -4,23 +4,67 @@
#####################
# Palette Settings
#####################
-palette-project = "Default" # The name of your project in Palette.
+palette-project = "Default" # The name of your project in Palette.
############################
# MAAS Deployment Settings
############################
-deploy-maas = false # Set to true to deploy to MAAS.
-deploy-maas-vm = false # Set to true to create a VM on MAAS cluster once deployed.
-pcg-name = "REPLACE ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
-maas-domain = "REPLACE ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
+deploy-maas-vm = false # Set to true to create a VM on MAAS VMO cluster once deployed.
-maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
-maas-worker-resource-pool = "REPLACE ME" # Provide a resource pool for the worker nodes.
-maas-worker-azs = ["REPLACE ME"] # Provide a set of availability zones for the worker nodes.
-maas-worker-node-tags = ["REPLACE ME"] # Provide a set of node tags for the worker nodes.
+pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
-maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
-maas-control-plane-resource-pool = "REPLACE ME" # Provide a resource pool for the control plane nodes.
-maas-control-plane-azs = ["REPLACE ME"] # Provide a set of availability zones for the control plane nodes.
-maas-control-plane-node-tags = ["REPLACE ME"] # Provide a set of node tags for the control plane nodes.
+maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
+maas-worker-resource-pool = "bm-generic" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["default"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["docs"] # Provide a set of node tags for the worker nodes.
+
+maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
+maas-control-plane-resource-pool = "Palette-Sustaining" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
+
+
+# #####################
+# # cluster_profiles.tf
+# #####################
+vmo-cluster-name = "vmo-cluster-maas"
+cluster-profile-type = "cluster" # Infrastructure, Full, or Add-on
+cluster-profile-version = "1.0.0" # Version number for the cluster profile in Palette
+
+
+# ##############
+# # clusters.tf
+# ##############
+ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
+ctl-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for control plane nodes
+wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
+wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
+
+
+# ###########################
+# # manifests/k8s-values.yaml
+# ###########################
+pod_CIDR = "100.64.1.0/24" # Set the subnet that your pods will run on
+serviceClusterIpRange = "100.64.2.0/24"
+
+
+# ###############################
+# # manifests/metallb-values.yaml
+# ###############################
+metallb-ip-pool = ["10.11.130.128/28"] # IP addresses to be assigned for use by MetalLB
+
+
+# #####################
+# # virtual_machines.tf
+# #####################
+vm-deploy-namespace = "default" # Namespace where your VM will be deployed.
+vm-deploy-name = "vmo-vm" # The name of your VM
+# vm_labels = "my-vmo-vm" # Labels that will be applied to your VM. For this tutorial, use a single label.
+vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
+vm-cpu-cores = 2 # Number of CPU cores your VM will have.
+vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
+vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
+vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
\ No newline at end of file
diff --git a/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
index 78eaaef..8a37c85 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
@@ -15,6 +15,23 @@ variables {
maas-control-plane-resource-pool = ""
maas-control-plane-azs = []
maas-control-plane-node-tags = []
+ vmo-cluster-name = ""
+ cluster-profile-type = ""
+ cluster-profile-version = ""
+ ctl-node-min-cpu = 1
+ ctl-node-min-memory-mb = 16384
+ wrk-node-min-cpu = 1
+ wrk-node-min-memory-mb = 16384
+ pod_CIDR = ""
+ serviceClusterIpRange = ""
+ metallb-ip-pool = []
+ vm-deploy-namespace = ""
+ vm-deploy-name = ""
+ vm-storage-Gi = ""
+ vm-cpu-cores = 2
+ vm-cpu-sockets = 1
+ vm-cpu-threads = 2
+ vm-memory-Gi = ""
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
index c89a7b9..c6559b5 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
@@ -15,6 +15,23 @@ variables {
maas-control-plane-resource-pool = "REPLACE ME"
maas-control-plane-azs = ["REPLACE ME"]
maas-control-plane-node-tags = ["REPLACE ME"]
+ vmo-cluster-name = "REPLACE ME"
+ cluster-profile-type = "REPLACE ME"
+ cluster-profile-version = "REPLACE ME"
+ ctl-node-min-cpu = 1
+ ctl-node-min-memory-mb = 16384
+ wrk-node-min-cpu = 1
+ wrk-node-min-memory-mb = 16384
+ pod_CIDR = "REPLACE ME"
+ serviceClusterIpRange = "REPLACE ME"
+ metallb-ip-pool = ["REPLACE ME"]
+ vm-deploy-namespace = "REPLACE ME"
+ vm-deploy-name = "REPLACE ME"
+ vm-storage-Gi = "REPLACE ME"
+ vm-cpu-cores = 2
+ vm-cpu-sockets = 1
+ vm-cpu-threads = 2
+ vm-memory-Gi = "REPLACE ME"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
index 60f9ae4..7a2f524 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
@@ -15,6 +15,23 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
+ vmo-cluster-name = "name"
+ cluster-profile-type = "profile-type"
+ cluster-profile-version = "profile-version"
+ ctl-node-min-cpu = 1
+ ctl-node-min-memory-mb = 16384
+ wrk-node-min-cpu = 1
+ wrk-node-min-memory-mb = 16384
+ pod_CIDR = "1.1.1.1/24"
+ serviceClusterIpRange = "1.1.1.1/24"
+ metallb-ip-pool = ["1.1.1.1"]
+ vm-deploy-namespace = "default"
+ vm-deploy-name = "name"
+ vm-storage-Gi = "64Gi"
+ vm-cpu-cores = 2
+ vm-cpu-sockets = 1
+ vm-cpu-threads = 2
+ vm-memory-Gi = "8Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
index 3d24108..ba45dd2 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
@@ -1,6 +1,6 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0
-# Test case 6 - Verify control plane and worker nodes cannot be set to 0.
+# Test case 6 - Verify control plane, worker nodes, VM Resources cannot be set to 0.
variables {
deploy-maas = true
@@ -15,6 +15,23 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
+ vmo-cluster-name = "name"
+ cluster-profile-type = "profile-type"
+ cluster-profile-version = "profile-version"
+ ctl-node-min-cpu = 0
+ ctl-node-min-memory-mb = 0
+ wrk-node-min-cpu = 0
+ wrk-node-min-memory-mb = 0
+ pod_CIDR = "1.1.1.1/24"
+ serviceClusterIpRange = "1.1.1.1/24"
+ metallb-ip-pool = ["1.1.1.1"]
+ vm-deploy-namespace = "default"
+ vm-deploy-name = "name"
+ vm-storage-Gi = "64Gi"
+ vm-cpu-cores = 0
+ vm-cpu-sockets = 0
+ vm-cpu-threads = 0
+ vm-memory-Gi = "8Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
index 6b2b426..ff6d35a 100644
--- a/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
@@ -15,6 +15,23 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
+ vmo-cluster-name = "name"
+ cluster-profile-type = "profile-type"
+ cluster-profile-version = "profile-version"
+ ctl-node-min-cpu = 1
+ ctl-node-min-memory-mb = 16384
+ wrk-node-min-cpu = 1
+ wrk-node-min-memory-mb = 16384
+ pod_CIDR = "1.1.1.1/24"
+ serviceClusterIpRange = "1.1.1.1/24"
+ metallb-ip-pool = ["1.1.1.1"]
+ vm-deploy-namespace = "default"
+ vm-deploy-name = "name"
+ vm-storage-Gi = "64Gi"
+ vm-cpu-cores = 2
+ vm-cpu-sockets = 1
+ vm-cpu-threads = 2
+ vm-memory-Gi = "8Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
index 9a07542..0002be6 100644
--- a/terraform/vmo-cluster/virtual_machines.tf
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -10,8 +10,8 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
cluster_context = data.spectrocloud_cluster.maas_vmo_cluster[0].context
run_on_launch = true
- namespace = "default"
- name = "ubuntu-tutorial-vm"
+ namespace = var.vm-deploy-namespace
+ name = var.vm-deploy-name
timeouts {
create = "60m"
@@ -37,7 +37,7 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
access_modes = ["ReadWriteMany"]
resources {
requests = {
- storage = "50Gi"
+ storage = var.vm-storage-Gi
}
}
storage_class_name = "ceph-block"
@@ -82,12 +82,12 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
cpu {
- cores = 2
- sockets = 1
- threads = 1
+ cores = var.vm-cpu-cores
+ sockets = var.vm-cpu-sockets
+ threads = var.vm-cpu-threads
}
memory {
- guest = "4Gi"
+ guest = var.vm-memory-Gi
}
resources {}
From 499a2ed0994037c4d2655a5ddd0ac7d780ff720b Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Fri, 23 May 2025 14:41:59 -0400
Subject: [PATCH 03/11] First PR for initial comments
---
terraform/vmo-cluster/inputs.tf | 30 -------------------
.../vmo-cluster/manifests/k8s-values.yaml | 4 +--
.../vmo-cluster/manifests/metallb-values.yaml | 2 +-
terraform/vmo-cluster/terraform.tfvars | 13 --------
4 files changed, 3 insertions(+), 46 deletions(-)
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index b0ac1c3..9b7ebe0 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -241,36 +241,6 @@ variable "wrk-node-min-memory-mb" {
}
}
-variable "pod_CIDR" {
- type = string
- description = "Set the subnet your K8s pods will use."
-
- validation {
- condition = var.deploy-maas ? var.pod_CIDR != "REPLACE ME" && var.pod_CIDR != "" : true
- error_message = "Provide a valid subnet in CIDR format ex: 1.1.1.1/24."
- }
-}
-
-variable "serviceClusterIpRange" {
- type = string
- description = "Set the subnet your K8s services will use."
-
- validation {
- condition = var.deploy-maas ? var.serviceClusterIpRange != "REPLACE ME" && var.serviceClusterIpRange != "" : true
- error_message = "Provide a valid subnet in CIDR format ex: 1.1.1.1/24."
- }
-}
-
-variable "metallb-ip-pool" {
- type = set(string)
- description = "Set the IP addresses or subnet range for MetalLB to use for ingress."
-
- validation {
- condition = var.deploy-maas ? var.metallb-ip-pool != "REPLACE ME" && var.metallb-ip-pool != "" : true
- error_message = "Provide valid IP addresses or subnet range for MetalLB to use for ingress."
- }
-}
-
variable "vm-deploy-namespace" {
type = string
description = "Set the target namespace where your VM will be deployed."
diff --git a/terraform/vmo-cluster/manifests/k8s-values.yaml b/terraform/vmo-cluster/manifests/k8s-values.yaml
index 8bc160c..d9be615 100644
--- a/terraform/vmo-cluster/manifests/k8s-values.yaml
+++ b/terraform/vmo-cluster/manifests/k8s-values.yaml
@@ -12,10 +12,10 @@ pack:
- image: registry.k8s.io/pause:3.8
#CIDR Range for Pods in cluster
# Note : This must not overlap with any of the host or service network
- podCIDR: var.pod_CIDR
+ podCIDR: "REPLACE_ME"
#CIDR notation IP range from which to assign service cluster IPs
# Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: var.serviceClusterIpRange
+ serviceClusterIpRange: "REPLACE_ME"
palette:
config:
dashboard:
diff --git a/terraform/vmo-cluster/manifests/metallb-values.yaml b/terraform/vmo-cluster/manifests/metallb-values.yaml
index 84e635d..1d6f606 100644
--- a/terraform/vmo-cluster/manifests/metallb-values.yaml
+++ b/terraform/vmo-cluster/manifests/metallb-values.yaml
@@ -19,7 +19,7 @@ charts:
first-pool:
spec:
addresses:
- - var.metallb-ip-pool
+ - "REPLACE_ME"
# - 192.168.100.50-192.168.100.60
avoidBuggyIPs: true # Buggy IPs are any .0 or .255 addresses. These are commonly used for subnet ID and broadcast addresses.
autoAssign: true
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 8a2bfbd..01af73a 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -44,19 +44,6 @@ wrk-node-min-cpu = 8 # Minimum number of CP
wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
-# ###########################
-# # manifests/k8s-values.yaml
-# ###########################
-pod_CIDR = "100.64.1.0/24" # Set the subnet that your pods will run on
-serviceClusterIpRange = "100.64.2.0/24"
-
-
-# ###############################
-# # manifests/metallb-values.yaml
-# ###############################
-metallb-ip-pool = ["10.11.130.128/28"] # IP addresses to be assigned for use by MetalLB
-
-
# #####################
# # virtual_machines.tf
# #####################
From 43e6159293622d85147171666ba570f8171ae806 Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Thu, 5 Jun 2025 16:36:59 -0400
Subject: [PATCH 04/11] variables-added
---
terraform/vmo-cluster/cluster_profiles.tf | 34 +-
terraform/vmo-cluster/clusters.tf | 4 +-
terraform/vmo-cluster/data.tf | 12 +-
terraform/vmo-cluster/inputs.tf | 223 ++++++++----
.../vmo-cluster/manifests/k8s-values.yaml | 38 ++-
.../vmo-cluster/manifests/metallb-values.yaml | 4 +-
.../vmo-cluster/manifests/ubuntu-values.yaml | 2 +-
.../vmo-cluster/manifests/vmo-values.yaml | 322 +++++++++++-------
terraform/vmo-cluster/terraform.tfvars | 60 +++-
.../maas-cluster-missing-values.tftest.hcl | 17 -
.../maas-cluster-replace-values.tftest.hcl | 17 -
.../tests/maas-cluster-vm.tftest.hcl | 17 -
.../tests/maas-cluster-zero-nodes.tftest.hcl | 19 +-
.../vmo-cluster/tests/maas-cluster.tftest.hcl | 17 -
terraform/vmo-cluster/virtual_machines.tf | 3 +-
15 files changed, 442 insertions(+), 347 deletions(-)
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
index 4be5c9e..8459d8b 100644
--- a/terraform/vmo-cluster/cluster_profiles.tf
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -5,18 +5,20 @@
resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
count = var.deploy-maas ? 1 : 0
- name = var.vmo-cluster-name
+ name = "tf-maas-vmo-profile"
description = "A basic cluster profile for MAAS VMO"
tags = concat(var.tags, ["env:maas"])
cloud = "maas"
- type = var.cluster-profile-type
- version = var.cluster-profile-version
+ type = var.clusterProfileType # "cluster"
+ version = var.clusterProfileVersion
pack {
name = data.spectrocloud_pack.maas_ubuntu.name
tag = data.spectrocloud_pack.maas_ubuntu.version
uid = data.spectrocloud_pack.maas_ubuntu.id
- values = file("manifests/ubuntu-values.yaml")
+ values = templatefile("manifests/ubuntu-values.yaml", {
+ maas-host-cidr = var.maas-host-cidr
+ })
type = "spectro"
}
@@ -24,8 +26,11 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
name = data.spectrocloud_pack.maas_k8s.name
tag = data.spectrocloud_pack.maas_k8s.version
uid = data.spectrocloud_pack.maas_k8s.id
- values = file("manifests/k8s-values.yaml")
+ values = templatefile("manifests/k8s-values.yaml", {
+ pod-cidr = var.pod-cidr,
+ clusterServicesCIDR = var.clusterServicesCIDR
type = "spectro"
+ })
}
pack {
@@ -41,23 +46,30 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
tag = data.spectrocloud_pack.maas_csi.version
uid = data.spectrocloud_pack.maas_csi.id
values = templatefile("manifests/csi-values.yaml", {
- worker_nodes = var.maas-worker-nodes,
+ worker_nodes = var.maas-worker-nodes
})
type = "spectro"
}
pack {
- name = "lb-metallb-helm"
- tag = "1.14.x"
- uid = data.spectrocloud_pack.maas_metallb.id
- values = file("manifests/metallb-values.yaml")
+ name = data.spectrocloud_pack.maas_metallb.name
+ tag = data.spectrocloud_pack.maas_metallb.version
+ uid = data.spectrocloud_pack.maas_metallb.id
+ values = templatefile("manifests/metallb-values.yaml", {
+ metallb-ip-pool = var.metallb-ip-pool
+ })
+ type = "spectro"
}
pack {
name = data.spectrocloud_pack.maas_vmo.name
tag = data.spectrocloud_pack.maas_vmo.version
uid = data.spectrocloud_pack.maas_vmo.id
- values = file("manifests/vmo-values.yaml")
+ values = templatefile("manifests/vmo-values.yaml", {
+ network-bridge = var.vmo-network-interface,
+ vm-vlans = var.vm-vlans,
+ host-vlans = var.host-vlans
+ })
type = "spectro"
}
diff --git a/terraform/vmo-cluster/clusters.tf b/terraform/vmo-cluster/clusters.tf
index 89ba609..e8977a8 100644
--- a/terraform/vmo-cluster/clusters.tf
+++ b/terraform/vmo-cluster/clusters.tf
@@ -8,7 +8,7 @@
resource "spectrocloud_cluster_maas" "maas-cluster" {
count = var.deploy-maas ? 1 : 0
- name = var.vmo-cluster-name
+ name = "vmo-cluster-maas"
tags = concat(var.tags, ["env:maas"])
cloud_account_id = data.spectrocloud_cloudaccount_maas.account[0].id
pause_agent_upgrades = "unlock"
@@ -43,7 +43,7 @@ resource "spectrocloud_cluster_maas" "maas-cluster" {
node_tags = var.maas-worker-node-tags
instance_type {
min_cpu = var.wrk-node-min-cpu
- min_memory_mb = var.wrk-node-min-memory-mb
+ min_memory_mb = var.wrk-node-min-memory-mb
}
placement {
resource_pool = var.maas-worker-resource-pool
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
index 581f342..dfb524b 100644
--- a/terraform/vmo-cluster/data.tf
+++ b/terraform/vmo-cluster/data.tf
@@ -25,19 +25,19 @@ data "spectrocloud_pack" "maas_ubuntu" {
data "spectrocloud_pack" "maas_k8s" {
name = "kubernetes"
- version = "1.32.2"
+ version = "1.30.6"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_cni" {
name = "cni-cilium-oss"
- version = "1.17.1"
+ version = "1.15.3"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_csi" {
name = "csi-rook-ceph-helm"
- version = "1.16.3"
+ version = "1.14.9"
registry_uid = data.spectrocloud_registry.public_registry.id
}
@@ -49,7 +49,7 @@ data "spectrocloud_pack" "maas_metallb" {
data "spectrocloud_pack" "maas_vmo" {
name = "virtual-machine-orchestrator"
- version = "4.6.3"
+ version = "4.4.10"
registry_uid = data.spectrocloud_registry.public_registry.id
}
@@ -58,6 +58,4 @@ data "spectrocloud_cluster" "maas_vmo_cluster" {
depends_on = [spectrocloud_cluster_maas.maas-cluster]
name = "vmo-cluster-maas"
context = "project"
-}
-
-
+}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index 9b7ebe0..2b85c93 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -31,29 +31,77 @@ variable "tags" {
]
}
-# ###########################
-# # manifests/k8s-values.yaml
-# ###########################
-# variable "pod_CIDR" {
-# type = string
-# description = "Subnet range to be used for pods in the cluster."
-# }
-# variable "serviceClusterIpRange" {
-# type = string
-# description = "Subnet range to use for Cluster Services."
-#}
+#####################
+# cluster_profiles.tf
+#####################
-# ################################
-# # manifests/metallb-values.yaml
-# ################################
+variable "clusterProfileType" {
+ type = string
+ description = "The name of the PCG that will be used to deploy the cluster."
+
+ validation {
+ condition = var.deploy-maas ? var.clusterProfileType != "REPLACE ME" && lower(var.clusterProfileType) == "full" || lower(var.clusterProfileType) == "infrastructure" || lower(var.clusterProfileType) == "add-on" || lower(var.clusterProfileType) == "app" : true
+ error_message = "Cluster profile type must be "full", "infrastructure", "add-on", or "app"."
+ }
+}
+
+variable "clusterProfileVersion" {
+ type = string
+ description = "The name of the PCG that will be used to deploy the cluster."
+
+ validation {
+ condition = var.deploy-maas ? var.clusterProfileVersion != "REPLACE ME" && var.clusterProfileVersion > 0 : true
+ error_message = "Cluster profile version must be set."
+ }
+}
+
+#########################
+# clusters.tf
+#########################
+
+variable "ctl-node-min-cpu" {
+ type = number
+ description = "Minimum number of CPU cores allocated to the Control Plane node."
+
+ validation {
+ condition = var.deploy-maas ? var.ctl-node-min-cpu > 0 : true
+ error_message = "Provide a valid number of cores for your Control Plane node."
+ }
+}
+
+variable "ctl-node-min-memory-mb" {
+ type = number
+ description = "Minimum amount of RAM allocated to the Control Plane node."
+
+ validation {
+ condition = var.deploy-maas ? var.ctl-node-min-cpu > 0 : true
+ error_message = "Provide a valid amount of RAM (MB) for your Control Plane node."
+ }
+}
+
+variable "wrk-node-min-cpu" {
+ type = number
+ description = "Minimum number of CPU cores allocated to the Control Plane node."
+
+ validation {
+ condition = var.deploy-maas ? var.wrk-node-min-cpu > 0 : true
+ error_message = "Provide a valid number of cores for your worker node."
+ }
+}
+
+variable "wrk-node-min-memory-mb" {
+ type = number
+ description = "Minimum amount of RAM allocated to the Control Plane node."
+
+ validation {
+ condition = var.deploy-maas ? var.wrk-node-min-cpu > 0 : true
+ error_message = "Provide a valid amount of RAM (MB) for your worker node."
+ }
+}
-# variable "metallb_ip_pool" {
-# type = number
-# description = "IP addresses to be assigned to MetalLB. Format 1.1.1.1, 1.1.1.2 or '1.1.1.1-1.1.1.2"
-#}
######
# MAAS
@@ -171,142 +219,167 @@ variable "maas-control-plane-node-tags" {
}
}
-variable "vmo-cluster-name" {
+#################
+# ubuntu-values.yaml
+#################
+
+variable "maas-host-cidr" {
type = string
- description = "Set of node tags for the MAAS control plane nodes."
+ description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
validation {
- condition = var.deploy-maas ? var.vmo-cluster-name != "REPLACE ME" && var.vmo-cluster-name != "" : true
- error_message = "Provide a valid set of node tags for control plane nodes."
+ condition = var.deploy-maas ? !contains(var.maas-host-cidr, "REPLACE ME") && length(var.maas-host-cidr) != 0 : true
+ error_message = "Provide a valid Subnet (CIDR Notation) for MAAS server network."
}
}
-variable "cluster-profile-type" {
+#################
+# k8s-values.yaml
+#################
+
+variable "pod-cidr" {
type = string
- description = "Identifies profile type of Infrastructure, Full, or Add-on."
+ description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
validation {
- condition = var.deploy-maas ? var.cluster-profile-type != "REPLACE ME" && var.cluster-profile-type != "" : true
- error_message = "Provide a valid cluster profile type."
+ condition = var.deploy-maas ? !contains(var.pod-cidr, "REPLACE ME") && length(var.pod-cidr) != 0 : true
+ error_message = "Provide a valid Subnet (CIDR Notation) for the pod network."
}
}
-variable "cluster-profile-version" {
+variable "clusterServicesCIDR" {
type = string
- description = "Set the version number of the cluster profile to be created"
-
+ description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
+
validation {
- condition = var.deploy-maas ? var.cluster-profile-version != "REPLACE ME" && var.cluster-profile-version != "" : true
- error_message = "Provide a valid version number."
+ condition = var.deploy-maas ? !contains(var.clusterServicesCIDR, "REPLACE ME") && length(var.clusterServicesCIDR) != 0 : true
+ error_message = "Provide a valid Subnet (CIDR Notation for cluster services."
}
}
-variable "ctl-node-min-cpu" {
- type = number
- description = "Set the minimum number of CPU cores to be used for the control plane nodes."
+#####################
+# metallb-values.yaml
+#####################
+
+variable "metallb-ip-pool" {
+ type = set(string)
+ description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
validation {
- condition = var.deploy-maas ? var.ctl-node-min-cpu > 0 : true
- error_message = "Provide a valid number of CPU cores to be used for control plane nodes."
+ condition = var.deploy-maas ? !contains(var.metallb-ip-pool, "REPLACE ME") && length(var.metallb-ip-pool) != 0 : true
+ error_message = "Provide a valid Subnet (CIDR Notation) or IP Range (192.168.1.0-192.168.1.255) for MetalLB."
}
}
-variable "ctl-node-min-memory-mb" {
- type = number
- description = "Set the minimum amount of Memory to be used for the control plane nodes."
+#################
+# vmo-values.yaml
+#################
+
+variable "vmo-network-interface" {
+ type = string
+ description = "The network interface VMO will use for VM traffic."
validation {
- condition = var.deploy-maas ? var.ctl-node-min-memory-mb > 0 : true
- error_message = "Provide a valid number amount of Memory to be used control plane nodes."
+ condition = var.deploy-maas ? !contains(var.vmo-network-interface, "REPLACE ME") && length(var.vmo-network-interface) != 0 : true
+ error_message = "Provide a valid network interface for the VMO service to use."
}
}
-variable "wrk-node-min-cpu" {
+variable "vm-vlans" {
type = number
- description = "Set the minimum number of CPU cores to be used for the worker nodes."
-
- validation {
- condition = var.deploy-maas ? var.wrk-node-min-cpu > 0 : true
- error_message = "Provide a valid number of CPU cores to be used for control plane nodes."
- }
+ description = "VM allowed VLANs."
+ default = 1
}
-variable "wrk-node-min-memory-mb" {
+variable "host-vlans" {
type = number
- description = "Set the minimum amount of Memory to be used for the worker nodes."
-
- validation {
- condition = var.deploy-maas ? var.wrk-node-min-memory-mb > 0 : true
- error_message = "Provide a valid amount of Memory to be used for the worker nodes."
- }
+ description = "Node Allowed VLANs"
+ default = 1
}
+#####################
+# virtual_machines.tf
+#####################
+
variable "vm-deploy-namespace" {
type = string
- description = "Set the target namespace where your VM will be deployed."
+ description = "The namespace where your VMs will be deployed."
validation {
- condition = var.deploy-maas ? var.vm-deploy-namespace != "REPLACE ME" && var.vm-deploy-namespace != "" : true
- error_message = "Provide valid namespace where your VM will be deployed."
+ condition = var.deploy-maas ? !contains(var.vm-deploy-namespace, "REPLACE ME") && length(var.vm-deploy-namespace) != 0 : true
+ error_message = "Provide a valid target namespace for your VM deployment."
}
}
variable "vm-deploy-name" {
type = string
- description = "Provide a valid name for your VM."
+ description = "The namespace where your VMs will be deployed."
validation {
- condition = var.deploy-maas ? var.vm-deploy-name != "REPLACE ME" && var.vm-deploy-name != "" : true
+ condition = var.deploy-maas ? !contains(var.vm-deploy-name, "REPLACE ME") && length(var.vm-deploy-name) != 0 : true
error_message = "Provide a valid name for your VM."
}
}
+variable "vm-labels" {
+ type = set(string)
+ description = "The namespace where your VMs will be deployed."
+
+ validation {
+ condition = var.deploy-maas ? !contains(var.vm-labels, "REPLACE ME") && length(var.vm-labels) != 0 : true
+ error_message = "Provide valid labels for your VM."
+ }
+}
+
variable "vm-storage-Gi" {
type = string
- description = "The amount of storage your VM will have."
+ description = "The amount of storage to provision for your VM in Gi."
validation {
- condition = var.deploy-maas ? var.vm-storage-Gi != "REPLACE ME" && var.vm-storage-Gi != "" : true
- error_message = "Provide a valid amount of storage for your VM. Include Gi at the end. Example 50Gi."
+ condition = var.deploy-maas ? !contains(var.vm-storage-Gi, "REPLACE ME") && length(var.vm-storage-Gi) != 0 && endswith((var.vm-storage-Gi), "Gi") : true
+ error_message = "Provide a valid amount of storage for your VM. You must include "Gi" at the end of your numerical value. Example: "50Gi"."
}
}
variable "vm-cpu-cores" {
type = number
- description = "Set the minimum number of CPU cores to be used for the control plane nodes."
+ description = "Number of CPU cores to allocate to your VM."
+ default = 1
validation {
- condition = var.deploy-maas-vm ? var.vm-cpu-cores > 0 : true
- error_message = "Provide a valid number of CPU cores to be used for control plane nodes."
+ condition = var.deploy-maas ? var.vm-cpu-cores > 0 : true
+ error_message = "Provide a valid number of CPU cores to allocate to your VM."
}
}
variable "vm-cpu-sockets" {
type = number
- description = "The number of CPU sockets the VM will use. This can be multiple to allow for hardware failure."
+ description = "Number of CPU cores to allocate to your VM."
+ default = 1
validation {
- condition = var.deploy-maas-vm ? var.vm-cpu-sockets > 0 : true
- error_message = "Provide a valid number of CPU sockets to be used by the VM. This can be multiple to allow for hardware failure."
+ condition = var.deploy-maas ? var.vm-cpu-sockets > 0 : true
+ error_message = "Provide a valid number of CPU Sockets that your VM must use."
}
}
variable "vm-cpu-threads" {
type = number
- description = "Set the number of CPU threads the VM will use."
+ description = "Number of CPU cores to allocate to your VM."
+ default = 1
validation {
- condition = var.deploy-maas-vm ? var.vm-cpu-threads > 0 : true
- error_message = "Provide a valid number of CPU threads the VM will use."
+ condition = var.deploy-maas ? var.vm-cpu-threads > 0 : true
+ error_message = "Provide a valid number of CPU threads that your VM should use."
}
}
variable "vm-memory-Gi" {
type = string
- description = "The amount of memory your VM will have."
+ description = "The amount of storage to provision for your VM in Gi."
validation {
- condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && var.vm-memory-Gi != "" : true
- error_message = "Provide a valid amount of memory for your VM. Include Gi at the end. Example 4Gi."
+ condition = var.deploy-maas ? !contains(var.vm-memory-Gi, "REPLACE ME") && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
+ error_message = "Provide a valid amount of memory to allocate your VM. You must include "Gi" at the end of your numerical value. Example: "4Gi"."
}
}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/k8s-values.yaml b/terraform/vmo-cluster/manifests/k8s-values.yaml
index d9be615..6a2c879 100644
--- a/terraform/vmo-cluster/manifests/k8s-values.yaml
+++ b/terraform/vmo-cluster/manifests/k8s-values.yaml
@@ -1,25 +1,26 @@
pack:
- k8sHardening: True
content:
images:
- image: registry.k8s.io/coredns/coredns:v1.11.3
- - image: registry.k8s.io/etcd:3.5.15-0
- - image: registry.k8s.io/kube-apiserver:v1.30.6
- - image: registry.k8s.io/kube-controller-manager:v1.30.6
- - image: registry.k8s.io/kube-proxy:v1.30.6
- - image: registry.k8s.io/kube-scheduler:v1.30.6
+ - image: registry.k8s.io/etcd:3.5.16-0
+ - image: registry.k8s.io/kube-apiserver:v1.32.2
+ - image: registry.k8s.io/kube-controller-manager:v1.32.2
+ - image: registry.k8s.io/kube-proxy:v1.32.2
+ - image: registry.k8s.io/kube-scheduler:v1.32.2
- image: registry.k8s.io/pause:3.9
- image: registry.k8s.io/pause:3.8
#CIDR Range for Pods in cluster
# Note : This must not overlap with any of the host or service network
- podCIDR: "REPLACE_ME"
+ podCIDR: ${pod-cidr} #"192.168.0.0/16"
#CIDR notation IP range from which to assign service cluster IPs
# Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: "REPLACE_ME"
+ serviceClusterIpRange: ${clusterServicesCIDR} #"10.96.0.0/12"
palette:
config:
dashboard:
identityProvider: palette
+ # serviceDomain: "cluster.local"
+
kubeadmconfig:
apiServer:
extraArgs:
@@ -34,7 +35,7 @@ kubeadmconfig:
admission-control-config-file: "/etc/kubernetes/pod-security-standard.yaml"
audit-log-path: /var/log/apiserver/audit.log
audit-policy-file: /etc/kubernetes/audit-policy.yaml
- audit-log-maxage: "31"
+ audit-log-maxage: "30"
audit-log-maxbackup: "10"
audit-log-maxsize: "100"
authorization-mode: RBAC,Node
@@ -93,26 +94,31 @@ kubeadmconfig:
kind: PodSecurityConfiguration
defaults:
enforce: "baseline"
- enforce-version: "v1.30"
+ enforce-version: "v1.32"
audit: "baseline"
- audit-version: "v1.30"
+ audit-version: "v1.32"
warn: "restricted"
- warn-version: "v1.30"
- audit: "restricted"
- audit-version: "v1.30"
+ warn-version: "v1.32"
exemptions:
# Array of authenticated usernames to exempt.
usernames: []
# Array of runtime class names to exempt.
runtimeClasses: []
# Array of namespaces to exempt.
- namespaces: [kube-system]
+ namespaces: [kube-system, rook-ceph]
preKubeadmCommands:
# For enabling 'protect-kernel-defaults' flag to kubelet, kernel parameters changes are required
- 'echo "====> Applying kernel parameters for Kubelet"'
- 'sysctl -p /etc/sysctl.d/90-kubelet.conf'
-
+
postKubeadmCommands:
- 'chmod 600 /var/lib/kubelet/config.yaml'
# - 'echo "List of post kubeadm commands to be executed"'
+
+ # Client configuration to add OIDC based authentication flags in kubeconfig
+ #clientConfig:
+ #oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
+ #oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
+ #oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
+ #oidc-extra-scope: profile,email
diff --git a/terraform/vmo-cluster/manifests/metallb-values.yaml b/terraform/vmo-cluster/manifests/metallb-values.yaml
index 1d6f606..78d00af 100644
--- a/terraform/vmo-cluster/manifests/metallb-values.yaml
+++ b/terraform/vmo-cluster/manifests/metallb-values.yaml
@@ -19,9 +19,9 @@ charts:
first-pool:
spec:
addresses:
- - "REPLACE_ME"
+ - ${metallb-ip-pool} ###.###.###.### # Add your static IP addresses for MetalLB
# - 192.168.100.50-192.168.100.60
- avoidBuggyIPs: true # Buggy IPs are any .0 or .255 addresses. These are commonly used for subnet ID and broadcast addresses.
+ avoidBuggyIPs: true
autoAssign: true
l2advertisements:
default:
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
index 00474c6..e7aeb8f 100644
--- a/terraform/vmo-cluster/manifests/ubuntu-values.yaml
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -5,7 +5,7 @@ kubeadmconfig:
- apt update
- apt install -y grepcidr
- |
- NETWORKS="10.11.136.128/27"
+ NETWORKS=${maas-host-cidr} #"###.###.###.###/###"
IPS=$(hostname -I)
for IP in $IPS
do
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
index 8fb06e8..6be493e 100644
--- a/terraform/vmo-cluster/manifests/vmo-values.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -1,42 +1,43 @@
pack:
content:
images:
- - image: gcr.io/spectro-images-public/release/spectro-vm-dashboard:4.4.10
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-operator:v1.2.0
- - image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v6.3.4
- - image: registry.k8s.io/sig-storage/snapshot-controller:v6.3.4
- - image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.0.2-thick
+ - image: us-docker.pkg.dev/palette-images/palette/spectro-vm-dashboard:4.6.3
+ - image: us-docker.pkg.dev/palette-images/third-party/kubevirt-ui:v19
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-operator:v1.4.0
+ - image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v8.1.0
+ - image: registry.k8s.io/sig-storage/snapshot-controller:v8.1.0
+ - image: registry.k8s.io/descheduler/descheduler:v0.32.0
+ - image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.1.4-thick
- image: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller:latest-amd64
- - image: quay.io/kubevirt/cdi-operator:v1.58.0
- - image: quay.io/kubevirt/cdi-uploadproxy:v1.58.0
- - image: quay.io/kubevirt/cdi-controller:v1.58.0
- - image: quay.io/kubevirt/cdi-apiserver:v1.58.0
- - image: quay.io/kubevirt/cdi-importer:v1.58.0
- - image: quay.io/kubevirt/cdi-uploadserver:v1.58.0
- - image: quay.io/kubevirt/cdi-cloner:v1.58.0
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-handler:v1.2.0
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-launcher:v1.2.0
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-exportproxy:v1.2.0
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-exportserver:v1.2.0
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-controller:v1.2.0
- - image: gcr.io/spectro-images-public/release/kubevirt/virt-api:v1.2.0
- - image: registry.k8s.io/descheduler/descheduler:v0.30.1
- - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/os/ubuntu-container-disk:22.04
- - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/os/fedora-container-disk:37
- - image: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/vlan-filtering/ubuntu:latest
- - image: gcr.io/spectro-images-public/release/spectro-cleanup:1.0.2
- - image: gcr.io/spectro-images-public/release/spectro-kubectl:1.30.2-spectro-4.4.a
+ - image: quay.io/kubevirt/cdi-operator:v1.61.0
+ - image: quay.io/kubevirt/cdi-uploadproxy:v1.61.0
+ - image: quay.io/kubevirt/cdi-controller:v1.61.0
+ - image: quay.io/kubevirt/cdi-apiserver:v1.61.0
+ - image: quay.io/kubevirt/cdi-importer:v1.61.0
+ - image: quay.io/kubevirt/cdi-uploadserver:v1.61.0
+ - image: quay.io/kubevirt/cdi-cloner:v1.61.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-handler:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-launcher:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-exportproxy:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-exportserver:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-controller:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-api:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/os/ubuntu-container-disk:22.04
+ - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/os/fedora-container-disk:37
+ - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/vlan-filtering/ubuntu:latest
+ - image: us-docker.pkg.dev/palette-images/palette/spectro-cleanup:1.0.3
+ - image: us-docker.pkg.dev/palette-images/palette/spectro-kubectl:v1.31.5-vmo
namespace: vm-dashboard
palette:
config:
dashboard:
access: private
- spectrocloud.com/install-priority: "20"
+ spectrocloud.com/install-priority: "10"
charts:
virtual-machine-orchestrator:
image:
- repository: gcr.io/spectro-images-public/release/spectro-vm-dashboard
- tag: "4.4.10"
+ repository: us-docker.pkg.dev/palette-images/palette/spectro-vm-dashboard
+ tag: "4.6.3"
service:
type: "ClusterIP"
appConfig:
@@ -58,9 +59,10 @@ charts:
ubuntu2204staticIP: false
fedora37staticIP: false
# To create additional vm templates refer to https://docs.spectrocloud.com/vm-management/create-manage-vm/create-vm-template
- # This namespace will be used to store golden images
+ # This namespace used to store golden images.
+
goldenImagesNamespace: "vmo-golden-images"
- # These namespaces will be created and set up to deploy VMs into
+ # These namespaces are created and set up to deploy VMs into
vmEnabledNamespaces:
- "default"
- "virtual-machines"
@@ -74,18 +76,18 @@ charts:
enabled: true
namespace: kube-system
image:
- repository: gcr.io/spectro-images-public/release/virtual-machine-orchestrator/vlan-filtering/ubuntu
+ repository: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/vlan-filtering/ubuntu
pullPolicy: IfNotPresent
tag: "latest"
env:
# Which bridge interface to control
- bridgeIF: "br0"
+ bridgeIF: ${vmo-network-interface} #"br0"
# Beginning of VLAN range to enable
- allowedVlans: "128,129"
+ allowedVlans: ${vm-vlans} # "1"
# Set to "true" to enable VLANs on the br0 interface for the host to use itself
allowVlansOnSelf: "true"
# Beginning of VLAN range to enable for use by the node itself
- allowedVlansOnSelf: "128,129"
+ allowedVlansOnSelf: ${host-vlans} #"1"
snapshot-controller:
enabled: true
replicas: 1
@@ -93,9 +95,9 @@ charts:
image:
repository: registry.k8s.io/sig-storage/snapshot-controller
pullPolicy: IfNotPresent
- tag: "v6.3.4"
- # A list/array of extra args that should be used
- # when running the controller. Default args include log verbose level
+ tag: "v8.1.0"
+ # A list/array of extra args to use
+ # when running the controller. Default args include log verbose level
# and leader election
extraArgs: []
# snapshot webhook config
@@ -108,7 +110,7 @@ charts:
repository: registry.k8s.io/sig-storage/snapshot-validation-webhook
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
- tag: "v6.3.4"
+ tag: "v8.1.0"
validatingWebhook:
failurePolicy: Fail
timeoutSeconds: 2
@@ -116,14 +118,14 @@ charts:
# TLS certificate is required. This Helm chart relies on
# cert-manager.io for managing TLS certificates.
tls:
- # If not empty, this issuer will be used to sign the certificate.
- # If none is provided, a new, self-signing issuer will be created.
+ # If not empty, this issuer is used to sign the certificate.
+ # If none is provided, a new, self-signing issuer is created.
issuerRef: {}
# name:
# kind:
# group: cert-manager.io
- # Certificate duration. The generated certificate will be automatically
+ # Certificate duration. The generated certificate is automatically
# renewed 1/3 of `certDuration` before its expiry.
# Value must be in units accepted by Go time.ParseDuration.
# See https://golang.org/pkg/time/#ParseDuration for allowed formats.
@@ -190,10 +192,6 @@ charts:
velero.io/csi-volumesnapshot-class: "true"
# time for sleep hook in seconds
hooksleepTime: 12
- # this install cert-manager latest version if not already installed
- cert-manager:
- enabled: false
- installCRDs: true
kubevirt:
enabled: true
# defaults to kubevirt
@@ -203,14 +201,14 @@ charts:
pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
replicas: 1
service:
- type: ClusterIP
+ type: LoadBalancer
port: 443
targetPort: 8443
image:
- repository: gcr.io/spectro-images-public/release/kubevirt/virt-operator
+ repository: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-operator
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
- tag: "v1.2.0"
+ tag: "v1.4.0"
## The Kubevirt CR that gets created
kubevirtResource:
name: kubevirt
@@ -225,15 +223,21 @@ charts:
- HotplugNICs
- VMLiveUpdateFeatures
- VMPersistentState
- - Sidecar
+ - VolumesUpdateStrategy
- VolumeMigration
- - CPUManager
- # for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
+ - CPUManager
+ - Sidecar
+ #- VMPersistentState
+ # for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
config:
evictionStrategy: "LiveMigrate"
# additionalConfig lets you define any configuration other than developerConfiguration and evictionStrategy
additionalConfig:
vmStateStorageClass: "ceph-filesystem"
+ #vmStateStorageClass: "" #fileSystem-based storageclass for persistent TPM
+ migrations:
+ allowAutoConverge: true
+ completionTimeoutPerGiB: 800
# additionalDevConfig lets you define dev config other than emulation and feature gate
additionalDevConfig: {}
# vmRolloutStrategy lets you define how changes to a VM object propagate to its VMI objects
@@ -242,43 +246,40 @@ charts:
customizeComponents:
# flags:
# api:
- # v:
+ # v:
# "5"
# port:
- # "8443"
+ # "8443"
imagePullPolicy: IfNotPresent
infra: {}
# The name of the Prometheus service account that needs read-access to KubeVirt endpoints
monitorAccount: "prometheus-operator-prometheus"
# The namespace Prometheus is deployed in
monitorNamespace: "monitoring"
- # The namespace the service monitor will be deployed. Either specify this or the monitorNamespace
+ # The namespace the service monitor is deployed to. Either specify this or the monitorNamespace
serviceMonitorNamespace: "monitoring"
workloads: {}
workloadsUpdateStrategy:
workloadUpdateMethods:
- LiveMigrate
- # uninstallStrategy to use, options are RemoveWorkloads, BlockUninstallIfWorkloadsExist
+ # uninstallStrategy to use, options are RemoveWorkloads, BlockUninstallIfWorkloadsExist
uninstallStrategy: ""
ingress:
- enabled: true
+ enabled: false
ingressClassName: nginx
annotations:
cert-manager.io/issuer: kubevirt-selfsigned-issuer
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
labels: {}
hosts:
- - host: virt-exportproxy.maas-eng.sc
+ - host: virt-exportproxy.maas.sc
paths:
- path: /
pathType: ImplementationSpecific
- tls:
- - secretName: virt-exportproxy-tls
- hosts:
- - virt-exportproxy.maas-eng.sc
- # - secretName: chart-example-tls
- # hosts:
- # - virt-exportproxy.maas.sc
+ # tls:
+ # - secretName: chart-example-tls
+ # hosts:
+ # - virt-exportproxy.maas.sc
cdi:
enabled: true
namespaceLabels:
@@ -289,40 +290,27 @@ charts:
repository: quay.io/kubevirt/cdi-operator
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
- tag: "v1.58.0"
- service:
- type: ClusterIP
- port: 443
- targetPort: 8443
+ tag: "v1.61.0"
# set enabled to true and add private registry details to bring up VMs in airgap environment
privateRegistry:
enabled: false
registryIP: #Ex: 10.10.225.20
- registryBasePath: #Ex: specto-images
- ## The CDI CR that gets created
- cdiResource:
- additionalFeatureGates:
- # - FeatureName
- additionalConfig:
- podResourceRequirements:
- requests:
- cpu: 1
- memory: 2G
- limits:
- cpu: 2
- memory: 8G
- filesystemOverhead:
- global: "0.055"
- storageClass:
- spectro-storage-class: "0.1"
- #insecureRegistries: [] # List of insecure registries to allow in the CDI importer, preffered in air-gapped environments
- #importProxy:
- # HTTPProxy: "http://username:password@your-proxy-server:3128"
- # HTTPSProxy: "http://username:password@your-proxy-server:3128"
- # noProxy: "127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.company.local"
- # TrustedCAProxy: configmap-name # optional: the ConfigMap name of a user-provided trusted certificate authority (CA) bundle to be added to the importer pod CA bundle
+ registryBasePath:
+ #Ex: specto-images
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+ service:
+ type: LoadBalancer
+ port: 443
+ targetPort: 8443
ingress:
- enabled: true
+ enabled: false
className: "nginx"
annotations:
cert-manager.io/issuer: cdi-selfsigned-issuer
@@ -332,41 +320,94 @@ charts:
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
hosts:
- - host: cdi-uploadproxy.maas-eng.sc
+ - host: cdi-uploadproxy.maas.sc
paths:
- path: /
pathType: ImplementationSpecific
- tls:
- - secretName: cdi-uploadproxy-tls
- hosts:
- - cdi-uploadproxy.maas-eng.sc
- # - secretName: chart-example-tls
- # hosts:
- # - cdi-uploadproxy.maas.sc
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - cdi-uploadproxy.maas.sc
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ ## The CDI CR that gets created
+ cdiResource:
+ additionalFeatureGates: # - FeatureName
+ additionalConfig:
+ filesystemOverhead:
+ global: "0.08"
+ storageClass:
+ spectro-storage-class: "0.08"
+ podResourceRequirements:
+ requests:
+ cpu: 250m
+ memory: 1G
+ limits:
+ cpu: 1
+ memory: 8G
+ insecureRegistries: [] # List of insecure registries to allow in the CDI importer, preffered in air-gapped environments
+ importProxy:
+ #HTTPProxy: "http://username:password@your-proxy-server:3128"
+ #HTTPSProxy: "http://username:password@your-proxy-server:3128"
+ #noProxy: "127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.company.local"
+ #TrustedCAProxy: configmap-name # optional: the ConfigMap name of an user-provided trusted certificate authority (CA) bundle to be added to the importer pod CA bundle
+ additionalSpec:
+ infra:
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ workload:
+ nodeSelector:
+ kubernetes.io/os: linux
+ imagePullPolicy: IfNotPresent
multus:
enabled: true
image:
repository: ghcr.io/k8snetworkplumbingwg/multus-cni
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
- tag: "v4.0.2-thick"
+ tag: "v4.1.4-thick"
networkController:
criSocket:
enableK3SHostPath: false # true for K3S and RKE2, false for PXK-E
- criSocketContainerPath: /host/run/containerd/containerd.sock
+ paletteAgentMode: false # true for running Palette Agent Mode clusters with PXK-E
+ # criSocketHostPathOverride: /run/containerd/containerd.sock
imagePullSecrets: []
podAnnotations: {}
+ resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ limits:
+ cpu: 100m
+ memory: 1Gi
+ requests:
+ cpu: 100m
+ memory: 50Mi
nodeSelector: {}
affinity: {}
dpdkCompatibility: false
cleanup:
- image: gcr.io/spectro-images-public/release/spectro-cleanup
- tag: "1.0.2"
+ image: us-docker.pkg.dev/palette-images/palette/spectro-cleanup
+ tag: "1.0.3"
networkAttachDef:
create: false
# a json string to apply
- config: ''
- # a sample config
+ config: ""
+ # a sample config
# '{
# "cniVersion": "0.3.0",
# "type": "macvlan",
@@ -391,10 +432,9 @@ charts:
image:
repository: registry.k8s.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
- tag: "v0.30.1"
+ tag: "v0.32.0"
pullPolicy: IfNotPresent
- imagePullSecrets:
- # - name: container-registry-secret
+ imagePullSecrets: # - name: container-registry-secret
resources:
requests:
cpu: 500m
@@ -402,6 +442,9 @@ charts:
limits:
cpu: 500m
memory: 256Mi
+ ports:
+ - containerPort: 10258
+ protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -412,15 +455,15 @@ charts:
runAsNonRoot: true
runAsUser: 1000
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
- podSecurityContext: {}
- # fsGroup: 1000
-
+ podSecurityContext: {} # fsGroup: 1000
nameOverride: ""
fullnameOverride: "descheduler"
+ # -- Override the deployment namespace; defaults to .Release.Namespace
+ namespaceOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1"
- schedule: "*/2 * * * *"
+ schedule: "*/15 * * * *"
suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 3
@@ -429,7 +472,7 @@ charts:
# timeZone: Etc/UTC
# Required when running as a Deployment
- deschedulingInterval: 5m
+ deschedulingInterval: 15m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
@@ -445,7 +488,7 @@ charts:
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
- # resourceNamescape: "kube-system"
+ # resourceNamespace: "kube-system"
command:
- "/bin/descheduler"
@@ -453,10 +496,16 @@ charts:
v: 3
# Recommended to use the latest Policy API version supported by the Descheduler app version
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
+ # deschedulerPolicy contains the policies the descheduler executes.
+ # To use policies stored in an existing configMap use:
+ # NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
+ # deschedulerPolicy: {}
deschedulerPolicy:
- # nodeSelector: "key1=value1,key2=value2"
- # maxNoOfPodsToEvictPerNode: 10
+ nodeSelector: kubevirt.io/schedulable=true
+ maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
+ metricsCollector:
+ enabled: true
# ignorePvcPods: true
# evictLocalStoragePods: true
# evictDaemonSetPods: true
@@ -474,6 +523,8 @@ charts:
args:
ignorePvcPods: true
evictLocalStoragePods: true
+ nodeFit: true
+ ignorePodsWithoutPDB: true
- name: RemoveDuplicates
- name: RemovePodsHavingTooManyRestarts
args:
@@ -484,18 +535,34 @@ charts:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
- name: RemovePodsViolatingNodeTaints
+ args:
+ excludedTaints:
+ - node.kubernetes.io/unschedulable
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- name: LowNodeUtilization
args:
thresholds:
cpu: 20
- memory: 20
- pods: 20
+ memory: 25
+ pods: 100
targetThresholds:
- cpu: 50
- memory: 50
- pods: 50
+ cpu: 60
+ memory: 75
+ pods: 100
+ metricsUtilization:
+ metricsServer: true
+ evictableNamespaces:
+ exclude:
+ - "cert-manager"
+ - "kube-system"
+ - "palette-system"
+ - "metallb-system"
+ - "cluster-{{ .spectro.system.cluster.uid }}"
+ - "kubevirt"
+ - "monitoring"
+ - "nginx"
+ - "vm-dashboard"
plugins:
balance:
enabled:
@@ -552,11 +619,11 @@ charts:
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
- name:
- # Specifies custom annotations for the serviceAccount
+ name: # Specifies custom annotations for the serviceAccount
annotations: {}
podAnnotations: {}
- podLabels: {}
+ podLabels:
+ spectrocloud.com/connection: proxy
dnsConfig: {}
livenessProbe:
failureThreshold: 3
@@ -583,8 +650,7 @@ charts:
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
# Add custom labels to the ServiceMonitor resource
- additionalLabels: {}
- # prometheus: kube-prometheus-stack
+ additionalLabels: {} # prometheus: kube-prometheus-stack
interval: ""
# honorLabels: true
insecureSkipVerify: true
@@ -599,4 +665,4 @@ charts:
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
- # action: replace
+ # action: replace
\ No newline at end of file
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 01af73a..95faf86 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -26,32 +26,56 @@ maas-control-plane-resource-pool = "Palette-Sustaining" # Provide a resourc
maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
+ctl-node-min-cpu = "6" # Minimum number of CPU cores required for control plane nodes
+ctl-node-min-memory-mb = "16384" # Minimum amount of RAM (memory) required for control plane nodes
+wrk-node-min-cpu = "8" # Minimum number of CPU cores required for worker nodes
+wrk-node-min-memory-mb = "16384" # Minimum amount of RAM (memory) required for worker nodes
-# #####################
-# # cluster_profiles.tf
-# #####################
-vmo-cluster-name = "vmo-cluster-maas"
-cluster-profile-type = "cluster" # Infrastructure, Full, or Add-on
-cluster-profile-version = "1.0.0" # Version number for the cluster profile in Palette
+
+#####################
+# cluster_profiles.tf
+#####################
+vmo_cluster_name = "vmo-cluster-maas"
+clusterProfileType = "Full" # Infrastructure, Full, or Add-on
+clusterProfileVersion = 1.0.0 # Version number for the cluster profile in Palette
+
+
+####################
+# ubuntu-values.tf
+#####################
+maas-host-cidr = "10.11.110.130/24"
-# ##############
-# # clusters.tf
-# ##############
-ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
-ctl-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for control plane nodes
-wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
-wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
+#####################
+# vmo-values.tf
+#####################
+vmo-network-interface = "br0"
+vm-vlans = ["1"]
+host-vlans = ["1"]
-# #####################
-# # virtual_machines.tf
-# #####################
+
+###########################
+# manifests/k8s-values.yaml
+###########################
+pod-CIDR = "100.64.0.0/16" # Set the subnet that your pods will run on
+clusterServicesCIDR = "100.64.64.0/16"
+
+
+###############################
+# manifests/metallb-values.yaml
+###############################
+metallb-ip-pool = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for use by MetalLB
+
+
+#####################
+# virtual_machines.tf
+#####################
vm-deploy-namespace = "default" # Namespace where your VM will be deployed.
vm-deploy-name = "vmo-vm" # The name of your VM
-# vm_labels = "my-vmo-vm" # Labels that will be applied to your VM. For this tutorial, use a single label.
+vm-labels = ["my-vmo-vm"] # Labels that will be applied to your VM. For this tutorial, use a single label.
vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
vm-cpu-cores = 2 # Number of CPU cores your VM will have.
vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
-vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
\ No newline at end of file
+vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
diff --git a/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
index 8a37c85..78eaaef 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-missing-values.tftest.hcl
@@ -15,23 +15,6 @@ variables {
maas-control-plane-resource-pool = ""
maas-control-plane-azs = []
maas-control-plane-node-tags = []
- vmo-cluster-name = ""
- cluster-profile-type = ""
- cluster-profile-version = ""
- ctl-node-min-cpu = 1
- ctl-node-min-memory-mb = 16384
- wrk-node-min-cpu = 1
- wrk-node-min-memory-mb = 16384
- pod_CIDR = ""
- serviceClusterIpRange = ""
- metallb-ip-pool = []
- vm-deploy-namespace = ""
- vm-deploy-name = ""
- vm-storage-Gi = ""
- vm-cpu-cores = 2
- vm-cpu-sockets = 1
- vm-cpu-threads = 2
- vm-memory-Gi = ""
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
index c6559b5..c89a7b9 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
@@ -15,23 +15,6 @@ variables {
maas-control-plane-resource-pool = "REPLACE ME"
maas-control-plane-azs = ["REPLACE ME"]
maas-control-plane-node-tags = ["REPLACE ME"]
- vmo-cluster-name = "REPLACE ME"
- cluster-profile-type = "REPLACE ME"
- cluster-profile-version = "REPLACE ME"
- ctl-node-min-cpu = 1
- ctl-node-min-memory-mb = 16384
- wrk-node-min-cpu = 1
- wrk-node-min-memory-mb = 16384
- pod_CIDR = "REPLACE ME"
- serviceClusterIpRange = "REPLACE ME"
- metallb-ip-pool = ["REPLACE ME"]
- vm-deploy-namespace = "REPLACE ME"
- vm-deploy-name = "REPLACE ME"
- vm-storage-Gi = "REPLACE ME"
- vm-cpu-cores = 2
- vm-cpu-sockets = 1
- vm-cpu-threads = 2
- vm-memory-Gi = "REPLACE ME"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
index 7a2f524..60f9ae4 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
@@ -15,23 +15,6 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
- vmo-cluster-name = "name"
- cluster-profile-type = "profile-type"
- cluster-profile-version = "profile-version"
- ctl-node-min-cpu = 1
- ctl-node-min-memory-mb = 16384
- wrk-node-min-cpu = 1
- wrk-node-min-memory-mb = 16384
- pod_CIDR = "1.1.1.1/24"
- serviceClusterIpRange = "1.1.1.1/24"
- metallb-ip-pool = ["1.1.1.1"]
- vm-deploy-namespace = "default"
- vm-deploy-name = "name"
- vm-storage-Gi = "64Gi"
- vm-cpu-cores = 2
- vm-cpu-sockets = 1
- vm-cpu-threads = 2
- vm-memory-Gi = "8Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
index ba45dd2..3d24108 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-zero-nodes.tftest.hcl
@@ -1,6 +1,6 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0
-# Test case 6 - Verify control plane, worker nodes, VM Resources cannot be set to 0.
+# Test case 6 - Verify control plane and worker nodes cannot be set to 0.
variables {
deploy-maas = true
@@ -15,23 +15,6 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
- vmo-cluster-name = "name"
- cluster-profile-type = "profile-type"
- cluster-profile-version = "profile-version"
- ctl-node-min-cpu = 0
- ctl-node-min-memory-mb = 0
- wrk-node-min-cpu = 0
- wrk-node-min-memory-mb = 0
- pod_CIDR = "1.1.1.1/24"
- serviceClusterIpRange = "1.1.1.1/24"
- metallb-ip-pool = ["1.1.1.1"]
- vm-deploy-namespace = "default"
- vm-deploy-name = "name"
- vm-storage-Gi = "64Gi"
- vm-cpu-cores = 0
- vm-cpu-sockets = 0
- vm-cpu-threads = 0
- vm-memory-Gi = "8Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
index ff6d35a..6b2b426 100644
--- a/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster.tftest.hcl
@@ -15,23 +15,6 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
- vmo-cluster-name = "name"
- cluster-profile-type = "profile-type"
- cluster-profile-version = "profile-version"
- ctl-node-min-cpu = 1
- ctl-node-min-memory-mb = 16384
- wrk-node-min-cpu = 1
- wrk-node-min-memory-mb = 16384
- pod_CIDR = "1.1.1.1/24"
- serviceClusterIpRange = "1.1.1.1/24"
- metallb-ip-pool = ["1.1.1.1"]
- vm-deploy-namespace = "default"
- vm-deploy-name = "name"
- vm-storage-Gi = "64Gi"
- vm-cpu-cores = 2
- vm-cpu-sockets = 1
- vm-cpu-threads = 2
- vm-memory-Gi = "8Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
index 0002be6..8e9272a 100644
--- a/terraform/vmo-cluster/virtual_machines.tf
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -18,7 +18,8 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
labels = {
- "tf" = "spectrocloud-tutorials"
+ labels = var.vm-labels
+ # "tf" = "spectrocloud-tutorials"
"kubevirt.io/vm" = "ubuntu-tutorial-vm"
}
From 0b8f63815ae1c9aabaf664cd91fa3ec1cfb23056 Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Sun, 8 Jun 2025 20:00:47 -0400
Subject: [PATCH 05/11] new variables, validations, and working manifests
added.
---
terraform/vmo-cluster/cluster_profiles.tf | 12 +-
terraform/vmo-cluster/clusters.tf | 2 +-
terraform/vmo-cluster/data.tf | 10 +-
terraform/vmo-cluster/inputs.tf | 92 +-
.../vmo-cluster/manifests/cni-values.yaml | 2238 +++++++++--------
.../vmo-cluster/manifests/csi-values.yaml | 312 ++-
.../vmo-cluster/manifests/k8s-values.yaml | 6 +-
.../vmo-cluster/manifests/metallb-values.yaml | 4 +-
.../vmo-cluster/manifests/ubuntu-values.yaml | 7 +-
.../manifests/vmo-extras-manifest.yaml | 96 +-
.../vmo-cluster/manifests/vmo-values.yaml | 6 +-
terraform/vmo-cluster/provider.tf | 2 +-
terraform/vmo-cluster/terraform.tfvars | 88 +-
terraform/vmo-cluster/virtual_machines.tf | 2 +-
14 files changed, 1616 insertions(+), 1261 deletions(-)
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
index 8459d8b..b923767 100644
--- a/terraform/vmo-cluster/cluster_profiles.tf
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -9,15 +9,15 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
description = "A basic cluster profile for MAAS VMO"
tags = concat(var.tags, ["env:maas"])
cloud = "maas"
- type = var.clusterProfileType # "cluster"
- version = var.clusterProfileVersion
+ type = var.cluster-profile-type
+ version = var.cluster-profile-version
pack {
name = data.spectrocloud_pack.maas_ubuntu.name
tag = data.spectrocloud_pack.maas_ubuntu.version
uid = data.spectrocloud_pack.maas_ubuntu.id
values = templatefile("manifests/ubuntu-values.yaml", {
- maas-host-cidr = var.maas-host-cidr
+ node-network = var.node-network
})
type = "spectro"
}
@@ -27,8 +27,8 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
tag = data.spectrocloud_pack.maas_k8s.version
uid = data.spectrocloud_pack.maas_k8s.id
values = templatefile("manifests/k8s-values.yaml", {
- pod-cidr = var.pod-cidr,
- clusterServicesCIDR = var.clusterServicesCIDR
+ pod-CIDR = var.pod-CIDR,
+ clusterServicesCIDR = var.cluster-services-CIDR
type = "spectro"
})
}
@@ -66,7 +66,7 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
tag = data.spectrocloud_pack.maas_vmo.version
uid = data.spectrocloud_pack.maas_vmo.id
values = templatefile("manifests/vmo-values.yaml", {
- network-bridge = var.vmo-network-interface,
+ vmo-network-interface = var.vmo-network-interface,
vm-vlans = var.vm-vlans,
host-vlans = var.host-vlans
})
diff --git a/terraform/vmo-cluster/clusters.tf b/terraform/vmo-cluster/clusters.tf
index e8977a8..cfb02e5 100644
--- a/terraform/vmo-cluster/clusters.tf
+++ b/terraform/vmo-cluster/clusters.tf
@@ -8,7 +8,7 @@
resource "spectrocloud_cluster_maas" "maas-cluster" {
count = var.deploy-maas ? 1 : 0
- name = "vmo-cluster-maas"
+ name = var.vmo-cluster-name
tags = concat(var.tags, ["env:maas"])
cloud_account_id = data.spectrocloud_cloudaccount_maas.account[0].id
pause_agent_upgrades = "unlock"
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
index dfb524b..baac57c 100644
--- a/terraform/vmo-cluster/data.tf
+++ b/terraform/vmo-cluster/data.tf
@@ -25,19 +25,19 @@ data "spectrocloud_pack" "maas_ubuntu" {
data "spectrocloud_pack" "maas_k8s" {
name = "kubernetes"
- version = "1.30.6"
+ version = "1.32.2"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_cni" {
name = "cni-cilium-oss"
- version = "1.15.3"
+ version = "1.17.1"
registry_uid = data.spectrocloud_registry.public_registry.id
}
data "spectrocloud_pack" "maas_csi" {
name = "csi-rook-ceph-helm"
- version = "1.14.9"
+ version = "1.16.3"
registry_uid = data.spectrocloud_registry.public_registry.id
}
@@ -49,7 +49,7 @@ data "spectrocloud_pack" "maas_metallb" {
data "spectrocloud_pack" "maas_vmo" {
name = "virtual-machine-orchestrator"
- version = "4.4.10"
+ version = "4.6.3"
registry_uid = data.spectrocloud_registry.public_registry.id
}
@@ -58,4 +58,4 @@ data "spectrocloud_cluster" "maas_vmo_cluster" {
depends_on = [spectrocloud_cluster_maas.maas-cluster]
name = "vmo-cluster-maas"
context = "project"
-}
\ No newline at end of file
+}
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index 2b85c93..7701bf2 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -38,22 +38,22 @@ variable "tags" {
# cluster_profiles.tf
#####################
-variable "clusterProfileType" {
+variable "cluster-profile-type" {
type = string
description = "The name of the PCG that will be used to deploy the cluster."
validation {
- condition = var.deploy-maas ? var.clusterProfileType != "REPLACE ME" && lower(var.clusterProfileType) == "full" || lower(var.clusterProfileType) == "infrastructure" || lower(var.clusterProfileType) == "add-on" || lower(var.clusterProfileType) == "app" : true
- error_message = "Cluster profile type must be "full", "infrastructure", "add-on", or "app"."
+ condition = var.deploy-maas ? var.cluster-profile-type != "REPLACE ME" && lower(var.cluster-profile-type) == "full" || lower(var.cluster-profile-type) == "infrastructure" || lower(var.cluster-profile-type) == "add-on" : true
+ error_message = "Cluster profile type must be 'full', 'infrastructure', 'add-on', or 'app'."
}
}
-variable "clusterProfileVersion" {
+variable "cluster-profile-version" {
type = string
description = "The name of the PCG that will be used to deploy the cluster."
validation {
- condition = var.deploy-maas ? var.clusterProfileVersion != "REPLACE ME" && var.clusterProfileVersion > 0 : true
+ condition = var.deploy-maas ? var.cluster-profile-version != "REPLACE ME" && var.cluster-profile-version != "" : true
error_message = "Cluster profile version must be set."
}
}
@@ -77,7 +77,7 @@ variable "ctl-node-min-memory-mb" {
description = "Minimum amount of RAM allocated to the Control Plane node."
validation {
- condition = var.deploy-maas ? var.ctl-node-min-cpu > 0 : true
+ condition = var.deploy-maas ? var.ctl-node-min-memory-mb > 0 : true
error_message = "Provide a valid amount of RAM (MB) for your Control Plane node."
}
}
@@ -97,15 +97,20 @@ variable "wrk-node-min-memory-mb" {
description = "Minimum amount of RAM allocated to the Control Plane node."
validation {
- condition = var.deploy-maas ? var.wrk-node-min-cpu > 0 : true
+ condition = var.deploy-maas ? var.wrk-node-min-memory-mb > 0 : true
error_message = "Provide a valid amount of RAM (MB) for your worker node."
}
}
+variable "vmo-cluster-name" {
+ type = string
+ description = "The name of the cluster."
-######
-# MAAS
-######
+ validation {
+ condition = var.deploy-maas ? var.vmo-cluster-name != "REPLACE ME" && var.vmo-cluster-name != "" : true
+ error_message = "Provide the correct MAAS PCG name."
+ }
+}
variable "deploy-maas" {
type = bool
@@ -220,45 +225,31 @@ variable "maas-control-plane-node-tags" {
}
#################
-# ubuntu-values.yaml
-#################
-
-variable "maas-host-cidr" {
- type = string
- description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
-
- validation {
- condition = var.deploy-maas ? !contains(var.maas-host-cidr, "REPLACE ME") && length(var.maas-host-cidr) != 0 : true
- error_message = "Provide a valid Subnet (CIDR Notation) for MAAS server network."
- }
-}
-
-#################
-# k8s-values.yaml
+# /manifests/k8s-values.yaml
#################
-variable "pod-cidr" {
- type = string
- description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
+variable "pod-CIDR" {
+ type = set(string)
+ description = "CIDR notation subnets for the pd network ex. 192.168.1.0/24."
validation {
- condition = var.deploy-maas ? !contains(var.pod-cidr, "REPLACE ME") && length(var.pod-cidr) != 0 : true
+ condition = var.deploy-maas ? !contains(var.pod-CIDR, "REPLACE ME") && length(var.pod-CIDR) != 0 : true
error_message = "Provide a valid Subnet (CIDR Notation) for the pod network."
}
}
-variable "clusterServicesCIDR" {
- type = string
- description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
+variable "cluster-services-CIDR" {
+ type = set(string)
+ description = "CIDR notation subnets for cluster services ex. 192.168.1.0/24."
validation {
- condition = var.deploy-maas ? !contains(var.clusterServicesCIDR, "REPLACE ME") && length(var.clusterServicesCIDR) != 0 : true
+ condition = var.deploy-maas ? !contains(var.cluster-services-CIDR, "REPLACE ME") && length(var.cluster-services-CIDR) != 0 : true
error_message = "Provide a valid Subnet (CIDR Notation for cluster services."
}
}
#####################
-# metallb-values.yaml
+# /manifests/metallb-values.yaml
#####################
variable "metallb-ip-pool" {
@@ -272,11 +263,11 @@ variable "metallb-ip-pool" {
}
#################
-# vmo-values.yaml
+# /manifests/vmo-values.yaml
#################
variable "vmo-network-interface" {
- type = string
+ type = set(string)
description = "The network interface VMO will use for VM traffic."
validation {
@@ -297,6 +288,21 @@ variable "host-vlans" {
default = 1
}
+#################
+# /manifests/ubuntu-values.yaml
+#################
+
+variable "node-network" {
+ type = string
+ description = "The subnet the Ubuntu nodes will use."
+
+ validation {
+ condition = var.deploy-maas ? var.node-network != "REPLACE ME" && length(var.node-network) != 0 : true
+ error_message = "Provide a valid network interface for the VMO service to use."
+ }
+}
+
+
#####################
# virtual_machines.tf
#####################
@@ -306,7 +312,7 @@ variable "vm-deploy-namespace" {
description = "The namespace where your VMs will be deployed."
validation {
- condition = var.deploy-maas ? !contains(var.vm-deploy-namespace, "REPLACE ME") && length(var.vm-deploy-namespace) != 0 : true
+ condition = var.deploy-maas ? var.vm-deploy-namespace != "REPLACE ME" && length(var.vm-deploy-namespace) != 0 : true
error_message = "Provide a valid target namespace for your VM deployment."
}
}
@@ -316,7 +322,7 @@ variable "vm-deploy-name" {
description = "The namespace where your VMs will be deployed."
validation {
- condition = var.deploy-maas ? !contains(var.vm-deploy-name, "REPLACE ME") && length(var.vm-deploy-name) != 0 : true
+ condition = var.deploy-maas ? var.vm-deploy-name != "REPLACE ME" && length(var.vm-deploy-name) != 0 : true
error_message = "Provide a valid name for your VM."
}
}
@@ -326,7 +332,7 @@ variable "vm-labels" {
description = "The namespace where your VMs will be deployed."
validation {
- condition = var.deploy-maas ? !contains(var.vm-labels, "REPLACE ME") && length(var.vm-labels) != 0 : true
+ condition = var.deploy-maas ? var.vm-labels != "REPLACE ME" && length(var.vm-labels) != 0 : true
error_message = "Provide valid labels for your VM."
}
}
@@ -336,8 +342,8 @@ variable "vm-storage-Gi" {
description = "The amount of storage to provision for your VM in Gi."
validation {
- condition = var.deploy-maas ? !contains(var.vm-storage-Gi, "REPLACE ME") && length(var.vm-storage-Gi) != 0 && endswith((var.vm-storage-Gi), "Gi") : true
- error_message = "Provide a valid amount of storage for your VM. You must include "Gi" at the end of your numerical value. Example: "50Gi"."
+ condition = var.deploy-maas ? var.vm-storage-Gi != "REPLACE ME" && length(var.vm-storage-Gi) != 0 && endswith((var.vm-storage-Gi), "Gi") : true
+ error_message = "Provide a valid amount of storage for your VM. You must include 'Gi' at the end of your numerical value. Example: '50Gi'."
}
}
@@ -379,7 +385,7 @@ variable "vm-memory-Gi" {
description = "The amount of storage to provision for your VM in Gi."
validation {
- condition = var.deploy-maas ? !contains(var.vm-memory-Gi, "REPLACE ME") && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
- error_message = "Provide a valid amount of memory to allocate your VM. You must include "Gi" at the end of your numerical value. Example: "4Gi"."
+ condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
+ error_message = "Provide a valid amount of memory to allocate your VM. You must include 'Gi' at the end of your numerical value. Example: '4Gi'."
}
}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/cni-values.yaml b/terraform/vmo-cluster/manifests/cni-values.yaml
index 897c048..b25f61a 100644
--- a/terraform/vmo-cluster/manifests/cni-values.yaml
+++ b/terraform/vmo-cluster/manifests/cni-values.yaml
@@ -1,42 +1,57 @@
pack:
content:
images:
- - image: quay.io/cilium/certgen:v0.1.9
- - image: quay.io/cilium/cilium:v1.15.3
- - image: quay.io/cilium/cilium-envoy:v1.27.3-99c1c8f42c8de70fc8f6dd594f4a425cd38b6688
- - image: quay.io/cilium/cilium-etcd-operator:v2.0.7
- - image: quay.io/cilium/clustermesh-apiserver:v1.15.3
- - image: quay.io/cilium/hubble-relay:v1.15.3
- - image: quay.io/cilium/hubble-ui:v0.13.0
- - image: quay.io/cilium/hubble-ui-backend:v0.13.0
- - image: quay.io/cilium/operator:v1.15.3
- - image: quay.io/cilium/operator-generic:v1.15.3
- - image: quay.io/cilium/operator-aws:v1.15.3
- - image: quay.io/cilium/operator-azure:v1.15.3
- - image: quay.io/cilium/startup-script:62093c5c233ea914bfa26a10ba41f8780d9b737f
- - image: ghcr.io/spiffe/spire-agent:1.8.5
- - image: ghcr.io/spiffe/spire-server:1.8.5
- - image: docker.io/library/busybox:1.36.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/certgen:v0.2.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/cilium:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/cilium-envoy:v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/clustermesh-apiserver:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/hubble-relay:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/hubble-ui:v0.13.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/hubble-ui-backend:v0.13.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/operator:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/operator-generic:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/operator-aws:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/operator-azure:v1.17.1
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/startup-script:c54c7edeab7fde4da68e59acd319ab24af242c3f
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/spire-agent:1.9.6
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/spire-server:1.9.6
+ - image: us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/busybox:v1.36.1
charts:
- repo: https://helm.cilium.io/
name: cilium
- version: 1.15.3
+ version: 1.17.1
#The namespace (on the target cluster) to install this chart
#When not found, a new namespace will be created
namespace: kube-system
charts:
cilium:
- # upgradeCompatibility helps users upgrading to ensure that the configMap for
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- namespaceOverride allows to override the destination namespace for Cilium resources.
+ # This property allows to use Cilium as part of an Umbrella Chart with different targets.
+ namespaceOverride: ""
+ # @schema
+ # type: [null, object]
+ # @schema
+ # -- commonLabels allows users to add common labels for all Cilium resources.
+ commonLabels: {}
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- upgradeCompatibility helps users upgrading to ensure that the configMap for
# Cilium will not change critical values to ensure continued operation
# This flag is not required for new installations.
- # For example: 1.7, 1.8, 1.9
- # upgradeCompatibility: '1.8'
-
+ # For example: '1.7', '1.8', '1.9'
+ upgradeCompatibility: null
debug:
# -- Enable debug logging
enabled: false
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Configure verbosity levels for debug logging
# This option is used to enable debug messages for operations related to such
# sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is
@@ -50,45 +65,81 @@ charts:
# - datapath
# - policy
verbose: ~
-
rbac:
# -- Enable creation of Resource-Based Access Control configuration.
create: true
-
# -- Configure image pull secrets for pulling container images
- imagePullSecrets:
+ imagePullSecrets: []
# - name: "image-pull-secret"
+ # -- Configure iptables--random-fully. Disabled by default. View https://github.com/cilium/cilium/issues/13037 for more information.
+ iptablesRandomFully: false
# -- (string) Kubernetes config path
# @default -- `"~/.kube/config"`
kubeConfigPath: ""
- # -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only)
+ # -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap
k8sServiceHost: ""
+ # @schema
+ # type: [string, integer]
+ # @schema
# -- (string) Kubernetes service port
k8sServicePort: ""
-
- # -- Configure the client side rate limit for the agent and operator
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- (string) When `k8sServiceHost=auto`, allows to customize the configMap name. It defaults to `cluster-info`.
+ k8sServiceLookupConfigMapName: ""
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- (string) When `k8sServiceHost=auto`, allows to customize the namespace that contains `k8sServiceLookupConfigMapName`. It defaults to `kube-public`.
+ k8sServiceLookupNamespace: ""
+ # -- Configure the client side rate limit for the agent
#
# If the amount of requests to the Kubernetes API server exceeds the configured
- # rate limit, the agent and operator will start to throttle requests by delaying
+ # rate limit, the agent will start to throttle requests by delaying
# them until there is budget or the request times out.
k8sClientRateLimit:
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- (int) The sustained request rate in requests per second.
- # @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+
- qps:
+ # @default -- 10
+ qps: # @schema
+
+ # type: [null, integer]
+ # @schema
# -- (int) The burst request rate in requests per second.
# The rate limiter will allow short bursts with a higher rate.
- # @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+
- burst:
+ # @default -- 20
+ burst: # -- Configure the client side rate limit for the Cilium Operator
+ operator:
+ # @schema
+ # type: [null, integer]
+ # @schema
+ # -- (int) The sustained request rate in requests per second.
+ # @default -- 100
+ qps: # @schema
+
+ # type: [null, integer]
+ # @schema
+ # -- (int) The burst request rate in requests per second.
+ # The rate limiter will allow short bursts with a higher rate.
+ # @default -- 200
+ burst:
cluster:
# -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE.
+ # It must respect the following constraints:
+ # * It must contain at most 32 characters;
+ # * It must begin and end with a lower case alphanumeric character;
+ # * It may contain lower case alphanumeric characters and dashes between.
+ # The "default" name cannot be used if the Cluster ID is different from 0.
name: default
# -- (int) Unique ID of the cluster. Must be unique across all connected
# clusters and in the range of 1 to 255. Only required for Cluster Mesh,
# may be 0 if Cluster Mesh is not used.
id: 0
-
# -- Define serviceAccount names for components.
# @default -- Component's fully qualified name.
serviceAccounts:
@@ -113,11 +164,6 @@ charts:
name: cilium-envoy
automount: true
annotations: {}
- etcd:
- create: true
- name: cilium-etcd-operator
- automount: true
- annotations: {}
operator:
create: true
name: cilium-operator
@@ -155,82 +201,84 @@ charts:
name: hubble-generate-certs
automount: true
annotations: {}
-
# -- Configure termination grace period for cilium-agent DaemonSet.
terminationGracePeriodSeconds: 1
-
# -- Install the cilium agent resources.
agent: true
-
# -- Agent container name.
name: cilium
-
# -- Roll out cilium agent pods automatically when configmap is updated.
rollOutCiliumPods: false
-
# -- Agent container image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/cilium"
- tag: "v1.15.3"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/cilium"
+ tag: "v1.17.1"
pullPolicy: "IfNotPresent"
# cilium-digest
digest: ""
useDigest: false
-
+ # -- Scheduling configurations for cilium pods
+ scheduling:
+ # @schema
+ # enum: ["anti-affinity", "kube-scheduler"]
+ # @schema
+ # -- Mode specifies how Cilium daemonset pods should be scheduled to Nodes.
+ # `anti-affinity` mode applies a pod anti-affinity rule to the cilium daemonset.
+ # Pod anti-affinity may significantly impact scheduling throughput for large clusters.
+ # See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ # `kube-scheduler` mode forgoes the anti-affinity rule for full scheduling throughput.
+ # Kube-scheduler avoids host port conflict when scheduling pods.
+ # @default -- Defaults to apply a pod anti-affinity rule to the agent pod - `anti-affinity`
+ mode: anti-affinity
# -- Affinity for cilium-agent.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- k8s-app: cilium
-
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
# -- Node selector for cilium-agent.
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for agent scheduling to nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- - operator: Exists
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # -- The priority class to use for cilium-agent.
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+ # -- The priority class to use for cilium-agent.
priorityClassName: ""
-
# -- DNS policy for Cilium agent pods.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ""
-
# -- Additional containers added to the cilium DaemonSet.
extraContainers: []
-
+ # -- Additional initContainers added to the cilium Daemonset.
+ extraInitContainers: []
# -- Additional agent container arguments.
extraArgs: []
-
# -- Additional agent container environment variables.
extraEnv: []
-
# -- Additional agent hostPath mounts.
extraHostPathMounts: []
- # - name: host-mnt-data
- # mountPath: /host/mnt/data
- # hostPath: /mnt/data
- # hostPathType: Directory
- # readOnly: true
- # mountPropagation: HostToContainer
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
# -- Additional agent volumes.
extraVolumes: []
-
# -- Additional agent volumeMounts.
extraVolumeMounts: []
-
# -- extraConfig allows you to specify additional configuration parameters to be
# included in the cilium-config configmap.
extraConfig: {}
@@ -242,29 +290,27 @@ charts:
# -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent)
annotations: {}
-
# -- Security Context for cilium-agent pods.
- podSecurityContext: {}
-
+ podSecurityContext:
+ # -- AppArmorProfile options for the `cilium-agent` and init containers
+ appArmorProfile:
+ type: "Unconfined"
# -- Annotations to be added to agent pods
podAnnotations: {}
-
# -- Labels to be added to agent pods
podLabels: {}
-
# -- Agent resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
- # limits:
- # cpu: 4000m
- # memory: 4Gi
- # requests:
- # cpu: 100m
- # memory: 512Mi
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
# -- resources & limits for the agent init containers
initResources: {}
-
securityContext:
# -- User to run the pod with
# runAsUser: 0
@@ -292,6 +338,7 @@ charts:
- IPC_LOCK
# Used in iptables. Consider removing once we are iptables-free
- SYS_MODULE
+ # Needed to switch network namespaces (used for health endpoint, socket-LB).
# We need it for now but might not need it for >= 5.11 specially
# for the 'SYS_RESOURCE'.
# In >= 5.8 there's already BPF and PERMON capabilities
@@ -346,28 +393,31 @@ charts:
# If available, SYS_ADMIN can be removed.
#- PERFMON
#- BPF
-
- # -- Cilium agent update strategy
+ # -- Cilium agent update strategy
updateStrategy:
type: RollingUpdate
rollingUpdate:
+ # @schema
+ # type: [integer, string]
+ # @schema
maxUnavailable: 2
-
# Configuration Values for cilium-agent
-
aksbyocni:
# -- Enable AKS BYOCNI integration.
# Note that this is incompatible with AKS clusters not created in BYOCNI mode:
# use Azure integration (`azure.enabled`) instead.
enabled: false
-
+ # @schema
+ # type: [boolean, string]
+ # @schema
# -- Enable installation of PodCIDR routes between worker
# nodes if worker nodes share a common L2 network segment.
autoDirectNodeRoutes: false
-
+ # -- Enable skipping of PodCIDR routes between worker
+ # nodes if the worker nodes are in a different L2 network segment.
+ directRoutingSkipUnreachable: false
# -- Annotate k8s node upon initialization with Cilium's metadata.
annotateK8sNode: false
-
azure:
# -- Enable Azure integration.
# Note that this is incompatible with AKS clusters created in BYOCNI mode: use
@@ -380,11 +430,9 @@ charts:
# clientID: 00000000-0000-0000-0000-000000000000
# clientSecret: 00000000-0000-0000-0000-000000000000
# userAssignedIdentityID: 00000000-0000-0000-0000-000000000000
-
alibabacloud:
# -- Enable AlibabaCloud ENI integration
enabled: false
-
# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow
# for rate-limiting traffic from individual Pods with EDT (Earliest Departure
# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation.
@@ -393,19 +441,16 @@ charts:
enabled: false
# -- Activate BBR TCP congestion control for Pods
bbr: false
-
# -- Configure standalone NAT46/NAT64 gateway
nat46x64Gateway:
# -- Enable RFC8215-prefixed translation
enabled: false
-
# -- EnableHighScaleIPcache enables the special ipcache mode for high scale
# clusters. The ipcache content will be reduced to the strict minimum and
# traffic will be encapsulated to carry security identities.
highScaleIPcache:
# -- Enable the high scale mode for the ipcache.
enabled: false
-
# -- Configure L2 announcements
l2announcements:
# -- Enable L2 announcements
@@ -416,25 +461,12 @@ charts:
# leaseRenewDeadline: 5s
# -- The timeout between retries if renewal fails
# leaseRetryPeriod: 2s
-
- # -- Configure L2 pod announcements
+ # -- Configure L2 pod announcements
l2podAnnouncements:
# -- Enable L2 pod announcements
enabled: false
# -- Interface used for sending Gratuitous ARP pod announcements
interface: "eth0"
-
- # -- Configure BGP
- bgp:
- # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside
- # cilium-agent and cilium-operator
- enabled: false
- announce:
- # -- Enable allocation and announcement of service LoadBalancer IPs
- loadbalancerIP: false
- # -- Enable announcement of node pod CIDR
- podCIDR: false
-
# -- This feature set enables virtual BGP routers to be created via
# CiliumBGPPeeringPolicy CRDs.
bgpControlPlane:
@@ -446,12 +478,16 @@ charts:
create: false
# -- The name of the secret namespace to which Cilium agents are given read access
name: kube-system
-
+ # -- Status reporting settings (BGPv2 only)
+ statusReport:
+ # -- Enable/Disable BGPv2 status reporting
+ # It is recommended to enable status reporting in general, but if you have any issue
+ # such as high API server load, you can disable it by setting this to false.
+ enabled: true
pmtuDiscovery:
# -- Enable path MTU discovery to send ICMP fragmentation-needed replies to
# the client.
enabled: false
-
bpf:
autoMount:
# -- Enable automatic mount of BPF filesystem
@@ -463,112 +499,189 @@ charts:
enabled: true
# -- Configure the mount point for the BPF filesystem
root: /sys/fs/bpf
-
# -- Enables pre-allocation of eBPF map values. This increases
# memory usage but can reduce latency.
preallocateMaps: false
-
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- (int) Configure the maximum number of entries in auth map.
# @default -- `524288`
authMapMax: ~
-
+ # -- Enable CT accounting for packets and bytes
+ ctAccounting: false
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- (int) Configure the maximum number of entries in the TCP connection tracking
# table.
# @default -- `524288`
ctTcpMax: ~
-
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- (int) Configure the maximum number of entries for the non-TCP connection
# tracking table.
# @default -- `262144`
ctAnyMax: ~
-
+ # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble.
+ # Helm configuration for BPF events map rate limiting is experimental and might change
+ # in upcoming releases.
+ events:
+ # -- Default settings for all types of events except dbg and pcap.
+ default:
+ # -- (int) Configure the limit of messages per second that can be written to
+ # BPF events map. The number of messages is averaged, meaning that if no messages
+ # were written to the map over 5 seconds, it's possible to write more events
+ # in the 6th second. If rateLimit is greater than 0, non-zero value for burstLimit must
+ # also be provided lest the configuration is considered invalid. Setting both burstLimit
+ # and rateLimit to 0 disables BPF events rate limiting.
+ # @default -- `0`
+ rateLimit: ~
+ # -- (int) Configure the maximum number of messages that can be written to BPF events
+ # map in 1 second. If burstLimit is greater than 0, non-zero value for rateLimit must
+ # also be provided lest the configuration is considered invalid. Setting both burstLimit
+ # and rateLimit to 0 disables BPF events rate limiting.
+ # @default -- `0`
+ burstLimit: ~
+ drop:
+ # -- Enable drop events.
+ enabled: true
+ policyVerdict:
+ # -- Enable policy verdict events.
+ enabled: true
+ trace:
+ # -- Enable trace events.
+ enabled: true
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- Configure the maximum number of service entries in the
# load balancer maps.
lbMapMax: 65536
-
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- (int) Configure the maximum number of entries for the NAT table.
# @default -- `524288`
natMax: ~
-
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- (int) Configure the maximum number of entries for the neighbor table.
# @default -- `524288`
neighMax: ~
-
+ # @schema
+ # type: [null, integer]
+ # @schema
+ # @default -- `16384`
+ # -- (int) Configures the maximum number of entries for the node table.
+ nodeMapMax: ~
# -- Configure the maximum number of entries in endpoint policy map (per endpoint).
+ # @schema
+ # type: [null, integer]
+ # @schema
policyMapMax: 16384
-
+ # @schema
+ # type: [null, number]
+ # @schema
# -- (float64) Configure auto-sizing for all BPF maps based on available memory.
# ref: https://docs.cilium.io/en/stable/network/ebpf/maps/
# @default -- `0.0025`
mapDynamicSizeRatio: ~
-
# -- Configure the level of aggregation for monitor notifications.
# Valid options are none, low, medium, maximum.
monitorAggregation: medium
-
# -- Configure the typical time between monitor notifications for
# active connections.
monitorInterval: "5s"
-
# -- Configure which TCP flags trigger notifications when seen for the
# first time in a connection.
monitorFlags: "all"
-
- # -- Allow cluster external access to ClusterIP services.
+ # -- (bool) Allow cluster external access to ClusterIP services.
+ # @default -- `false`
lbExternalClusterIP: false
-
+ # -- (bool) Enable loadBalancerSourceRanges CIDR filtering for all service
+ # types, not just LoadBalancer services. The corresponding NodePort and
+ # ClusterIP (if enabled for cluster-external traffic) will also apply the
+ # CIDR filter.
+ # @default -- `false`
+ lbSourceRangeAllTypes: false
+ # -- (bool) Enable the option to define the load balancing algorithm on
+ # a per-service basis through service.cilium.io/lb-algorithm annotation.
+ # @default -- `false`
+ lbAlgorithmAnnotation: false
+ # -- (bool) Enable the option to define the load balancing mode (SNAT or DSR)
+ # on a per-service basis through service.cilium.io/forwarding-mode annotation.
+ # @default -- `false`
+ lbModeAnnotation: false
+ # @schema
+ # type: [null, boolean]
+ # @schema
# -- (bool) Enable native IP masquerade support in eBPF
# @default -- `false`
masquerade: ~
-
+ # @schema
+ # type: [null, boolean]
+ # @schema
# -- (bool) Configure whether direct routing mode should route traffic via
# host stack (true) or directly and more efficiently out of BPF (false) if
# the kernel supports it. The latter has the implication that it will also
# bypass netfilter in the host namespace.
# @default -- `false`
hostLegacyRouting: ~
-
- # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules
+ # @schema
+ # type: [null, boolean]
+ # @schema
+ # -- (bool) Configure the eBPF-based TPROXY (beta) to reduce reliance on iptables rules
# for implementing Layer 7 policy.
# @default -- `false`
tproxy: ~
-
+ # @schema
+ # type: [null, array]
+ # @schema
# -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass.
# [0] will allow all VLAN id's without any filtering.
# @default -- `[]`
vlanBypass: ~
-
+ # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554)
+ # @default -- `false`
+ disableExternalIPMitigation: false
+ # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on
+ # supported kernels.
+ # @default -- `true`
+ enableTCX: true
+ # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only)
+ # @default -- `veth`
+ datapathMode: veth
# -- Enable BPF clock source probing for more efficient tick retrieval.
bpfClockProbe: false
-
# -- Clean all eBPF datapath state from the initContainer of the cilium-agent
# DaemonSet.
#
# WARNING: Use with care!
cleanBpfState: false
-
# -- Clean all local Cilium state from the initContainer of the cilium-agent
# DaemonSet. Implies cleanBpfState: true.
#
# WARNING: Use with care!
cleanState: false
-
# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy"
# init container before launching cilium-agent.
# More context can be found in the commit message of below PR
# https://github.com/cilium/cilium/pull/20123
waitForKubeProxy: false
-
cni:
# -- Install the CNI configuration and binary files into the filesystem.
install: true
-
# -- Remove the CNI configuration and binary files on agent shutdown. Enable this
# if you're removing Cilium from the cluster. Disable this to prevent the CNI
# configuration file from being removed during agent upgrade, which can cause
# nodes to go unmanageable.
uninstall: false
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Configure chaining on top of other CNI plugins. Possible values:
# - none
# - aws-cni
@@ -576,34 +689,25 @@ charts:
# - generic-veth
# - portmap
chainingMode: ~
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- A CNI network name in to which the Cilium plugin should be added as a chained plugin.
# This will cause the agent to watch for a CNI network with this network name. When it is
# found, this will be used as the basis for Cilium's CNI configuration file. If this is
# set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode
# of aws-cni implies a chainingTarget of aws-cni.
chainingTarget: ~
-
- # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the
- # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
- # This ensures no Pods can be scheduled using other CNI plugins during Cilium
- # agent downtime.
- exclusive: false
-
# -- Configure the log file for CNI logging with retention policy of 7 days.
# Disable CNI file logging by setting this field to empty explicitly.
logFile: /var/run/cilium/cilium-cni.log
-
# -- Skip writing of the CNI configuration. This can be used if
# writing of the CNI configuration is performed by external automation.
customConf: false
-
# -- Configure the path to the CNI configuration directory on the host.
confPath: /etc/cni/net.d
-
# -- Configure the path to the CNI binary directory on the host.
binPath: /opt/cni/bin
-
# -- Specify the path to a CNI config to read from on agent start.
# This can be useful if you want to manage your CNI
# configuration outside of a Kubernetes environment. This parameter is
@@ -619,59 +723,49 @@ charts:
# -- Configure the key in the CNI ConfigMap to read the contents of
# the CNI configuration from.
configMapKey: cni-config
-
# -- Configure the path to where to mount the ConfigMap inside the agent pod.
confFileMountPath: /tmp/cni-configuration
-
# -- Configure the path to where the CNI configuration directory is mounted
# inside the agent pod.
hostConfDirMountPath: /host/etc/cni/net.d
-
# -- Specifies the resources for the cni initContainer
resources:
requests:
cpu: 100m
memory: 10Mi
-
+ # -- Enable route MTU for pod netns when CNI chaining is used
+ enableRouteMTUForCNIChaining: false
+ exclusive: false
# -- (string) Configure how frequently garbage collection should occur for the datapath
# connection tracking table.
# @default -- `"0s"`
conntrackGCInterval: ""
-
# -- (string) Configure the maximum frequency for the garbage collection of the
# connection tracking table. Only affects the automatic computation for the frequency
# and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently
# clean up unused identities created from ToFQDN policies.
conntrackGCMaxInterval: ""
-
- # -- Configure container runtime specific integration.
- # Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15.
- containerRuntime:
- # -- Enables specific integrations for container runtimes.
- # Supported values:
- # - crio
- # - none
- integration: none
-
# -- (string) Configure timeout in which Cilium will exit if CRDs are not available
# @default -- `"5m"`
crdWaitTimeout: ""
-
# -- Tail call hooks for custom eBPF programs.
customCalls:
# -- Enable tail call hooks for custom eBPF programs.
enabled: false
-
daemon:
# -- Configure where Cilium runtime state should be stored.
runPath: "/var/run/cilium"
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Configure a custom list of possible configuration override sources
# The default is "config-map:cilium-config,cilium-node-config". For supported
# values, see the help text for the build-config subcommand.
# Note that this value should be a comma-separated string.
configSources: ~
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- allowedConfigOverrides is a list of config-map keys that can be overridden.
# That is to say, if this value is set, config sources (excepting the first one) can
# only override keys in this list.
@@ -681,7 +775,9 @@ charts:
# By default, all keys may be overridden. To disable overrides, set this to "none" or
# change the configSources variable.
allowedConfigOverrides: ~
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- blockedConfigOverrides is a list of config-map keys that may not be overridden.
# In other words, if any of these keys appear in a configuration source excepting the
# first one, they will be ignored
@@ -690,7 +786,14 @@ charts:
#
# By default, all keys may be overridden.
blockedConfigOverrides: ~
-
+ # @schema
+ # type: [null, boolean]
+ # @schema
+ # -- enableSourceIPVerification is a boolean flag to enable or disable the Source IP verification
+ # of endpoints. This flag is useful when Cilium is chained with other CNIs.
+ #
+ # By default, this functionality is enabled
+ enableSourceIPVerification: true
# -- Specify which network interfaces can run the eBPF datapath. This means
# that a packet sent from a pod to a destination outside the cluster will be
# masqueraded (to an output device IPv4 address), if the output device runs the
@@ -702,8 +805,11 @@ charts:
# devices. When devices change the eBPF datapath is reloaded and services updated.
# If "devices" is set then only those devices, or devices matching a wildcard will
# be considered.
- enableRuntimeDeviceDetection: false
-
+ #
+ # This option has been deprecated and is a no-op.
+ enableRuntimeDeviceDetection: true
+ # -- Forces the auto-detection of devices, even if specific devices are explicitly listed
+ forceDeviceDetection: false
# -- Chains to ignore when installing feeder rules.
# disableIptablesFeederRules: ""
@@ -716,65 +822,80 @@ charts:
# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it.
# enableK8sEndpointSlice: true
- # -- Enable CiliumEndpointSlice feature.
+ # -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead).
enableCiliumEndpointSlice: false
-
+ ciliumEndpointSlice:
+ # -- Enable Cilium EndpointSlice feature.
+ enabled: false
+ # -- List of rate limit options to be used for the CiliumEndpointSlice controller.
+ # Each object in the list must have the following fields:
+ # nodes: Count of nodes at which to apply the rate limit.
+ # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50.
+ # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100.
+ rateLimits:
+ - nodes: 0
+ limit: 10
+ burst: 20
+ - nodes: 100
+ limit: 50
+ burst: 100
+ # @schema
+ # enum: ["identity", "fcfs"]
+ # @schema
+ # -- The slicing mode to use for CiliumEndpointSlices.
+ # identity groups together CiliumEndpoints that share the same identity.
+ # fcfs groups together CiliumEndpoints in a first-come-first-serve basis, filling in the largest non-full slice first.
+ sliceMode: identity
envoyConfig:
# -- Enable CiliumEnvoyConfig CRD
# CiliumEnvoyConfig CRD can also be implicitly enabled by other options.
enabled: false
-
# -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from.
secretsNamespace:
# -- Create secrets namespace for CiliumEnvoyConfig CRDs.
create: true
-
# -- The name of the secret namespace to which Cilium agents are given read access.
name: cilium-secrets
-
+ # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated.
+ retryInterval: 15s
ingressController:
# -- Enable cilium ingress controller
# This will automatically set enable-envoy-config as well.
enabled: false
-
# -- Set cilium ingress controller to be the default ingress controller
# This will let cilium ingress controller route entries without ingress class set
default: false
-
# -- Default ingress load balancer mode
# Supported values: shared, dedicated
- # For granular control, use the following annotations on the ingress resource
- # ingress.cilium.io/loadbalancer-mode: shared|dedicated,
+ # For granular control, use the following annotations on the ingress resource:
+ # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared").
loadbalancerMode: dedicated
-
# -- Enforce https for host having matching TLS host in Ingress.
# Incoming traffic to http listener will return 308 http error code with respective location in header.
enforceHttps: true
-
# -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled.
enableProxyProtocol: false
-
# -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service
- ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com']
-
+ ingressLBAnnotationPrefixes: [ 'lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com' ]
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Default secret namespace for ingresses without .spec.tls[].secretName set.
- defaultSecretNamespace:
+ defaultSecretNamespace: # @schema
+ # type: [null, string]
+ # @schema
# -- Default secret name for ingresses without .spec.tls[].secretName set.
- defaultSecretName:
+ defaultSecretName: # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
- # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
secretsNamespace:
# -- Create secrets namespace for Ingress.
create: true
-
# -- Name of Ingress secret namespace.
name: cilium-secrets
-
# -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
# If disabled, TLS secrets must be maintained externally.
sync: true
-
# -- Load-balancer service in shared mode.
# This is a single load-balancer service for all Ingress resources.
service:
@@ -786,130 +907,150 @@ charts:
annotations: {}
# -- Service type for the shared LB service
type: LoadBalancer
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service
insecureNodePort: ~
+ # @schema
+ # type: [null, integer]
+ # @schema
# -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service
- secureNodePort : ~
+ secureNodePort: ~
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+)
loadBalancerClass: ~
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Configure a specific loadBalancerIP on the shared LB service
- loadBalancerIP : ~
+ loadBalancerIP: ~
+ # @schema
+ # type: [null, boolean]
+ # @schema
# -- Configure if node port allocation is required for LB service
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation
allocateLoadBalancerNodePorts: ~
-
+ # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode.
+ # Valid values are "Cluster" and "Local".
+ # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy
+ externalTrafficPolicy: Cluster
+ # Host Network related configuration
+ hostNetwork:
+ # -- Configure whether the Envoy listeners should be exposed on the host network.
+ enabled: false
+ # -- Configure a specific port on the host network that gets used for the shared listener.
+ sharedListenerPort: 8080
+ # Specify the nodes where the Ingress listeners should be exposed
+ nodes:
+ # -- Specify the labels of the nodes where the Ingress listeners should be exposed
+ #
+ # matchLabels:
+ # kubernetes.io/os: linux
+ # kubernetes.io/hostname: kind-worker
+ matchLabels: {}
gatewayAPI:
# -- Enable support for Gateway API in cilium
# This will automatically set enable-envoy-config as well.
enabled: false
-
+ # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled.
+ enableProxyProtocol: false
+ # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol.
+ enableAppProtocol: false
+ # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1.
+ # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`.
+ enableAlpn: false
+ # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address.
+ xffNumTrustedHops: 0
+ # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local".
+ # Note that this value will be ignored when `hostNetwork.enabled == true`.
+ # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy
+ externalTrafficPolicy: Cluster
+ gatewayClass:
+ # -- Enable creation of GatewayClass resource
+ # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster.
+ # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively.
+ create: auto
# -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
secretsNamespace:
# -- Create secrets namespace for Gateway API.
create: true
-
# -- Name of Gateway API secret namespace.
name: cilium-secrets
-
# -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
# If disabled, TLS secrets must be maintained externally.
sync: true
-
+ # Host Network related configuration
+ hostNetwork:
+ # -- Configure whether the Envoy listeners should be exposed on the host network.
+ enabled: false
+ # Specify the nodes where the Ingress listeners should be exposed
+ nodes:
+ # -- Specify the labels of the nodes where the Ingress listeners should be exposed
+ #
+ # matchLabels:
+ # kubernetes.io/os: linux
+ # kubernetes.io/hostname: kind-worker
+ matchLabels: {}
# -- Enables the fallback compatibility solution for when the xt_socket kernel
# module is missing and it is needed for the datapath L7 redirection to work
# properly. See documentation for details on when this can be disabled:
# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel.
enableXTSocketFallback: true
-
encryption:
# -- Enable transparent network encryption.
enabled: false
-
# -- Encryption method. Can be either ipsec or wireguard.
type: ipsec
-
# -- Enable encryption for pure node to node traffic.
# This option is only effective when encryption.type is set to "wireguard".
nodeEncryption: false
-
# -- Configure the WireGuard Pod2Pod strict mode.
strictMode:
# -- Enable WireGuard Pod2Pod strict mode.
enabled: false
-
# -- CIDR for the WireGuard Pod2Pod strict mode.
cidr: ""
-
# -- Allow dynamic lookup of remote node identities.
# This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap.
allowRemoteNodeIdentities: false
-
ipsec:
# -- Name of the key file inside the Kubernetes secret configured via secretName.
- keyFile: ""
-
+ keyFile: keys
# -- Path to mount the secret inside the Cilium pod.
- mountPath: ""
-
+ mountPath: /etc/ipsec
# -- Name of the Kubernetes secret containing the encryption keys.
- secretName: ""
-
+ secretName: cilium-ipsec-keys
# -- The interface to use for encrypted traffic.
interface: ""
-
# -- Enable the key watcher. If disabled, a restart of the agent will be
# necessary on key rotations.
keyWatcher: true
-
# -- Maximum duration of the IPsec key rotation. The previous key will be
# removed after that delay.
keyRotationDuration: "5m"
-
+ # -- Enable IPsec encrypted overlay
+ encryptedOverlay: false
wireguard:
- # -- Enables the fallback to the user-space implementation.
- userspaceFallback: false
- # -- Controls Wireguard PersistentKeepalive option. Set 0s to disable.
+ # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable.
persistentKeepalive: 0s
-
- # -- Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15.
- # Name of the key file inside the Kubernetes secret configured via secretName.
- # This option is only effective when encryption.type is set to ipsec.
- keyFile: keys
-
- # -- Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15.
- # Path to mount the secret inside the Cilium pod.
- # This option is only effective when encryption.type is set to ipsec.
- mountPath: /etc/ipsec
-
- # -- Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15.
- # Name of the Kubernetes secret containing the encryption keys.
- # This option is only effective when encryption.type is set to ipsec.
- secretName: cilium-ipsec-keys
-
- # -- Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15.
- # The interface to use for encrypted traffic.
- # This option is only effective when encryption.type is set to ipsec.
- interface: ""
-
endpointHealthChecking:
# -- Enable connectivity health checking between virtual endpoints.
enabled: true
-
- # -- Enable endpoint status.
- # Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space.
- endpointStatus:
- enabled: false
- status: ""
-
endpointRoutes:
+ # @schema
+ # type: [boolean, string]
+ # @schema
# -- Enable use of per endpoint routes instead of routing via
# the cilium_host interface.
enabled: false
-
k8sNetworkPolicy:
# -- Enable support for K8s NetworkPolicy
enabled: true
-
+ # -- Enable endpoint lockdown on policy map overflow.
+ endpointLockdownOnMapOverflow: false
eni:
# -- Enable Elastic Network Interface (ENI) integration.
enabled: false
@@ -948,52 +1089,54 @@ charts:
# -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances
# are going to be used to create new ENIs
instanceTagsFilter: []
-
externalIPs:
# -- Enable ExternalIPs service support.
enabled: false
-
# fragmentTracking enables IPv4 fragment tracking support in the datapath.
# fragmentTracking: true
-
gke:
# -- Enable Google Kubernetes Engine integration
enabled: false
-
# -- Enable connectivity health checking.
healthChecking: true
-
# -- TCP port for the agent health API. This is not the port for cilium-health.
healthPort: 9879
-
+ # -- Number of ICMP requests sent for each health check before marking a node or endpoint unreachable.
+ healthCheckICMPFailureThreshold: 3
# -- Configure the host firewall.
hostFirewall:
# -- Enables the enforcement of host policies in the eBPF datapath.
enabled: false
-
hostPort:
# -- Enable hostPort service support.
enabled: false
-
# -- Configure socket LB
socketLB:
# -- Enable socket LB
enabled: false
-
+ hostNamespaceOnly: true
# -- Disable socket lb for non-root ns. This is used to enable Istio routing rules.
# hostNamespaceOnly: false
-
- # -- Configure certificate generation for Hubble integration.
- # If hubble.tls.auto.method=cronJob, these values are used
- # for the Kubernetes CronJob which will be scheduled regularly to
- # (re)generate any certificates not provided manually.
+ # -- Enable terminating pod connections to deleted service backends.
+ # terminatePodConnections: true
+ # -- Enables tracing for socket-based load balancing.
+ # tracing: true
+ # -- Configure certificate generation for Hubble integration.
+ # If hubble.tls.auto.method=cronJob, these values are used
+ # for the Kubernetes CronJob which will be scheduled regularly to
+ # (re)generate any certificates not provided manually.
certgen:
+ # -- When set to true the certificate authority secret is created.
+ generateCA: true
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/certgen"
- tag: "v0.1.9"
- digest: "sha256:89a0847753686444daabde9474b48340993bd19c7bea66a46e45b2974b82041f"
- useDigest: true
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/certgen"
+ tag: "v0.2.1"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
# -- Seconds after which the completed job pod will be deleted
ttlSecondsAfterFinished: 1800
@@ -1003,26 +1146,26 @@ charts:
annotations:
job: {}
cronJob: {}
+ # -- Node selector for certgen
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
+ nodeSelector: {}
+ # -- Priority class for certgen
+ # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+ priorityClassName: ""
# -- Node tolerations for pod assignment on nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
-
# -- Additional certgen volumes.
extraVolumes: []
-
# -- Additional certgen volumeMounts.
extraVolumeMounts: []
-
# -- Affinity for certgen
affinity: {}
-
hubble:
# -- Enable Hubble (true by default).
enabled: true
-
# -- Annotations to be added to all top-level hubble objects (resources under templates/hubble)
annotations: {}
-
# -- Buffer size of the channel Hubble uses to receive monitor events. If this
# value is not set, the queue size is set to the default monitor queue size.
# eventQueueSize: ""
@@ -1037,6 +1180,9 @@ charts:
# See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics
# for more comprehensive documentation about Hubble metrics.
metrics:
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Configures the list of metrics to collect. If empty or null, metrics
# are disabled.
# Example:
@@ -1058,6 +1204,37 @@ charts:
enableOpenMetrics: false
# -- Configure the port the hubble metric server listens on.
port: 9965
+ tls:
+ # Enable hubble metrics server TLS.
+ enabled: false
+ # Configure hubble metrics server TLS.
+ server:
+ # -- Name of the Secret containing the certificate and key for the Hubble metrics server.
+ # If specified, cert and key are ignored.
+ existingSecret: ""
+ # -- base64 encoded PEM values for the Hubble metrics server certificate (deprecated).
+ # Use existingSecret instead.
+ cert: ""
+ # -- base64 encoded PEM values for the Hubble metrics server key (deprecated).
+ # Use existingSecret instead.
+ key: ""
+ # -- Extra DNS names added to certificate when it's auto generated
+ extraDnsNames: []
+ # -- Extra IP addresses added to certificate when it's auto generated
+ extraIpAddresses: []
+ # -- Configure mTLS for the Hubble metrics server.
+ mtls:
+ # When set to true enforces mutual TLS between Hubble Metrics server and its clients.
+ # False allow non-mutual TLS connections.
+ # This option has no effect when TLS is disabled.
+ enabled: false
+ useSecret: false
+ # -- Name of the ConfigMap containing the CA to validate client certificates against.
+ # If mTLS is enabled and this is unspecified, it will default to the
+ # same CA used for Hubble metrics server certificates.
+ name: ~
+ # -- Entry of the ConfigMap containing the CA.
+ key: ca.crt
# -- Annotations to be added to hubble-metrics service.
serviceAnnotations: {}
serviceMonitor:
@@ -1079,21 +1256,44 @@ charts:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor hubble
metricRelabelings: ~
+ # Configure TLS for the ServiceMonitor.
+ # Note, when using TLS you will either need to specify
+ # tlsConfig.insecureSkipVerify or specify a CA to use.
+ tlsConfig: {}
# -- Grafana dashboards for hubble
# grafana can import dashboards based on the label and value
# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
dashboards:
enabled: false
label: grafana_dashboard
+ # @schema
+ # type: [null, string]
+ # @schema
namespace: ~
labelValue: "1"
annotations: {}
-
+ # Dynamic metrics may be reconfigured without a need of agent restarts.
+ dynamic:
+ enabled: false
+ config:
+ # ---- Name of configmap with configuration that may be altered to reconfigure metric handlers within a running agent.
+ configMapName: cilium-dynamic-metrics-config
+ # ---- True if helm installer should create config map.
+ # Switch to false if you want to self maintain the file content.
+ createConfigMap: true
+ # ---- Exporters configuration in YAML format.
+ content:
+ - name: all
+ contextOptions: []
+ includeFilters: []
+ excludeFilters: []
# -- Unix domain socket path to listen to when Hubble is enabled.
socketPath: /var/run/cilium/hubble.sock
-
# -- Enables redacting sensitive information present in Layer 7 flows.
redact:
enabled: false
@@ -1168,17 +1368,18 @@ charts:
# --set hubble.redact.enabled="true"
# --set hubble.redact.kafka.apiKey="true"
apiKey: false
-
# -- An additional address for Hubble to listen to.
# Set this field ":4244" if you are enabling Hubble Relay, as it assumes that
# Hubble is listening on port 4244.
listenAddress: ":4244"
# -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available.
preferIpv6: false
+ # @schema
+ # type: [null, boolean]
+ # @schema
# -- (bool) Skip Hubble events with unknown cgroup ids
# @default -- `true`
skipUnknownCGroupIDs: ~
-
peerService:
# -- Service Port for the Peer service.
# If not set, it is dynamically assigned to port 443 if TLS is enabled and to
@@ -1212,7 +1413,10 @@ charts:
# - certmanager: This method use cert-manager to generate & rotate certificates.
method: helm
# -- Generated certificates validity duration in days.
- certValidityDuration: 1095
+ #
+ # Defaults to 365 days (1 year) because MacOS does not accept
+ # self-signed certificates with expirations > 825 days.
+ certValidityDuration: 365
# -- Schedule for certificates regeneration (regardless of their expiration date).
# Only used if method is "cronJob". If nil, then no recurring job will be created.
# Instead, only the one-shot job is deployed to generate the certificates at
@@ -1221,7 +1425,6 @@ charts:
# Defaults to midnight of the first day of every fourth month. For syntax, see
# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
schedule: "0 0 1 */4 *"
-
# [Example]
# certManagerIssuerRef:
# group: cert-manager.io
@@ -1229,108 +1432,106 @@ charts:
# name: ca-issuer
# -- certmanager issuer used when hubble.tls.auto.method=certmanager.
certManagerIssuerRef: {}
-
- # -- base64 encoded PEM values for the Hubble server certificate and private key
+ # -- The Hubble server certificate and private key
server:
+ # -- Name of the Secret containing the certificate and key for the Hubble server.
+ # If specified, cert and key are ignored.
+ existingSecret: ""
+ # -- base64 encoded PEM values for the Hubble server certificate (deprecated).
+ # Use existingSecret instead.
cert: ""
+ # -- base64 encoded PEM values for the Hubble server key (deprecated).
+ # Use existingSecret instead.
key: ""
# -- Extra DNS names added to certificate when it's auto generated
extraDnsNames: []
# -- Extra IP addresses added to certificate when it's auto generated
extraIpAddresses: []
-
relay:
# -- Enable Hubble Relay (requires hubble.enabled=true)
enabled: false
-
# -- Roll out Hubble Relay pods automatically when configmap is updated.
rollOutPods: false
-
# -- Hubble-relay container image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/hubble-relay"
- tag: "v1.15.3"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/hubble-relay"
+ tag: "v1.17.1"
# hubble-relay-digest
digest: ""
useDigest: false
pullPolicy: "IfNotPresent"
-
# -- Specifies the resources for the hubble-relay pods
resources: {}
-
# -- Number of replicas run for the hubble-relay deployment.
replicas: 1
-
# -- Affinity for hubble-replay
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- k8s-app: cilium
-
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
# -- Pod topology spread constraints for hubble-relay
topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
# -- Node labels for pod assignment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for pod assignment on nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
-
# -- Additional hubble-relay environment variables.
extraEnv: []
-
# -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay)
annotations: {}
-
# -- Annotations to be added to hubble-relay pods
podAnnotations: {}
-
# -- Labels to be added to hubble-relay pods
podLabels: {}
-
# PodDisruptionBudget settings
podDisruptionBudget:
# -- enable PodDisruptionBudget
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
enabled: false
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Minimum number/percentage of pods that should remain scheduled.
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
minAvailable: null
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
-
# -- The priority class to use for hubble-relay
priorityClassName: ""
-
# -- Configure termination grace period for hubble relay Deployment.
terminationGracePeriodSeconds: 1
-
# -- hubble-relay update strategy
updateStrategy:
type: RollingUpdate
rollingUpdate:
+ # @schema
+ # type: [integer, string]
+ # @schema
maxUnavailable: 1
-
# -- Additional hubble-relay volumes.
extraVolumes: []
-
# -- Additional hubble-relay volumeMounts.
extraVolumeMounts: []
-
# -- hubble-relay pod security context
podSecurityContext:
fsGroup: 65532
-
# -- hubble-relay container security context
securityContext:
# readOnlyRootFilesystem: true
@@ -1339,31 +1540,34 @@ charts:
runAsGroup: 65532
capabilities:
drop:
- - ALL
-
+ - ALL
# -- hubble-relay service configuration.
service:
- # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort.
+ # --- The type of service used for Hubble Relay access, either ClusterIP, NodePort or LoadBalancer.
type: ClusterIP
# --- The port to use when the service type is set to NodePort.
nodePort: 31234
-
# -- Host to listen to. Specify an empty string to bind to all the interfaces.
listenHost: ""
-
# -- Port to listen to.
listenPort: "4245"
-
# -- TLS configuration for Hubble Relay
tls:
- # -- base64 encoded PEM values for the hubble-relay client certificate and private key
+ # -- The hubble-relay client certificate and private key.
# This keypair is presented to Hubble server instances for mTLS
# authentication and is required when hubble.tls.enabled is true.
# These values need to be set manually if hubble.tls.auto.enabled is false.
client:
+ # -- Name of the Secret containing the certificate and key for the Hubble metrics server.
+ # If specified, cert and key are ignored.
+ existingSecret: ""
+ # -- base64 encoded PEM values for the Hubble relay client certificate (deprecated).
+ # Use existingSecret instead.
cert: ""
+ # -- base64 encoded PEM values for the Hubble relay client key (deprecated).
+ # Use existingSecret instead.
key: ""
- # -- base64 encoded PEM values for the hubble-relay server certificate and private key
+ # -- The hubble-relay server certificate and private key
server:
# When set to true, enable TLS on for Hubble Relay server
# (ie: for clients connecting to the Hubble Relay API).
@@ -1372,34 +1576,49 @@ charts:
# False allow non-mutual TLS connections.
# This option has no effect when TLS is disabled.
mtls: false
- # These values need to be set manually if hubble.tls.auto.enabled is false.
+ # -- Name of the Secret containing the certificate and key for the Hubble relay server.
+ # If specified, cert and key are ignored.
+ existingSecret: ""
+ # -- base64 encoded PEM values for the Hubble relay server certificate (deprecated).
+ # Use existingSecret instead.
cert: ""
+ # -- base64 encoded PEM values for the Hubble relay server key (deprecated).
+ # Use existingSecret instead.
key: ""
# -- extra DNS names added to certificate when its auto gen
extraDnsNames: []
# -- extra IP addresses added to certificate when its auto gen
extraIpAddresses: []
# DNS name used by the backend to connect to the relay
- # This is a simple workaround as the relay certificates are currently hardcoded to
- # *.hubble-relay.cilium.io
+ # This is a simple workaround as the relay certificates are currently hardcoded to
+ # *.hubble-relay.cilium.io
# See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546
# For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local
relayName: "ui.hubble-relay.cilium.io"
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s").
+ #
+ # This option has been deprecated and is a no-op.
dialTimeout: ~
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s").
retryTimeout: ~
-
- # -- Max number of flows that can be buffered for sorting before being sent to the
+ # @schema
+ # type: [null, integer]
+ # @schema
+ # -- (int) Max number of flows that can be buffered for sorting before being sent to the
# client (per request) (e.g. 100).
sortBufferLenMax: ~
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- When the per-request flows sort buffer is not full, a flow is drained every
# time this timeout is reached (only affects requests in follow-mode) (e.g. "1s").
sortBufferDrainTimeout: ~
-
# -- Port to use for the k8s service backed by hubble-relay pods.
# If not set, it is dynamically assigned to port 443 if TLS is enabled and to
# port 80 if not.
@@ -1423,17 +1642,21 @@ charts:
# -- Specify the Kubernetes namespace where Prometheus expects to find
# service monitors configured.
# namespace: ""
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Relabeling configs for the ServiceMonitor hubble-relay
relabelings: ~
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor hubble-relay
metricRelabelings: ~
-
gops:
# -- Enable gops for hubble-relay
enabled: true
# -- Configure gops listen port for hubble-relay
port: 9893
-
pprof:
# -- Enable pprof for hubble-relay
enabled: false
@@ -1441,77 +1664,70 @@ charts:
address: localhost
# -- Configure pprof listen port for hubble-relay
port: 6062
-
ui:
# -- Whether to enable the Hubble UI.
enabled: false
-
standalone:
# -- When true, it will allow installing the Hubble UI only, without checking dependencies.
# It is useful if a cluster already has cilium and Hubble relay installed and you just
# want Hubble UI to be deployed.
# When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui`
enabled: false
-
tls:
# -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required
# to provide a volume for mounting the client certificates.
certsVolume: {}
- # projected:
- # defaultMode: 0400
- # sources:
- # - secret:
- # name: hubble-ui-client-certs
- # items:
- # - key: tls.crt
- # path: client.crt
- # - key: tls.key
- # path: client.key
- # - key: ca.crt
- # path: hubble-relay-ca.crt
-
- # -- Roll out Hubble-ui pods automatically when configmap is updated.
+ # projected:
+ # defaultMode: 0400
+ # sources:
+ # - secret:
+ # name: hubble-ui-client-certs
+ # items:
+ # - key: tls.crt
+ # path: client.crt
+ # - key: tls.key
+ # path: client.key
+ # - key: ca.crt
+ # path: hubble-relay-ca.crt
+ # -- Roll out Hubble-ui pods automatically when configmap is updated.
rollOutPods: false
-
tls:
- # -- base64 encoded PEM values used to connect to hubble-relay
- # This keypair is presented to Hubble Relay instances for mTLS
- # authentication and is required when hubble.relay.tls.server.enabled is true.
- # These values need to be set manually if hubble.tls.auto.enabled is false.
client:
+ # -- Name of the Secret containing the client certificate and key for Hubble UI
+ # If specified, cert and key are ignored.
+ existingSecret: ""
+ # -- base64 encoded PEM values for the Hubble UI client certificate (deprecated).
+ # Use existingSecret instead.
cert: ""
+ # -- base64 encoded PEM values for the Hubble UI client key (deprecated).
+ # Use existingSecret instead.
key: ""
-
backend:
# -- Hubble-ui backend image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/hubble-ui-backend"
- tag: "v0.13.0"
- digest: "sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803"
- useDigest: true
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/hubble-ui-backend"
+ tag: "v0.13.1"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
-
# -- Hubble-ui backend security context.
securityContext: {}
-
# -- Additional hubble-ui backend environment variables.
extraEnv: []
-
# -- Additional hubble-ui backend volumes.
extraVolumes: []
-
# -- Additional hubble-ui backend volumeMounts.
extraVolumeMounts: []
-
livenessProbe:
# -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
enabled: false
-
readinessProbe:
# -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
enabled: false
-
# -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment.
resources: {}
# limits:
@@ -1520,29 +1736,26 @@ charts:
# requests:
# cpu: 100m
# memory: 64Mi
-
frontend:
# -- Hubble-ui frontend image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/hubble-ui"
- tag: "v0.13.0"
- digest: "sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666"
- useDigest: true
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/hubble-ui"
+ tag: "v0.13.1"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
-
# -- Hubble-ui frontend security context.
securityContext: {}
-
# -- Additional hubble-ui frontend environment variables.
extraEnv: []
-
# -- Additional hubble-ui frontend volumes.
extraVolumes: []
-
# -- Additional hubble-ui frontend volumeMounts.
extraVolumeMounts: []
-
# -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment.
resources: {}
# limits:
@@ -1555,63 +1768,62 @@ charts:
# -- Controls server listener for ipv6
ipv6:
enabled: true
-
# -- The number of replicas of Hubble UI to deploy.
replicas: 1
-
# -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui)
annotations: {}
-
+ # -- Additional labels to be added to 'hubble-ui' deployment object
+ labels: {}
# -- Annotations to be added to hubble-ui pods
podAnnotations: {}
-
# -- Labels to be added to hubble-ui pods
podLabels: {}
-
# PodDisruptionBudget settings
podDisruptionBudget:
# -- enable PodDisruptionBudget
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
enabled: false
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Minimum number/percentage of pods that should remain scheduled.
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
minAvailable: null
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
-
# -- Affinity for hubble-ui
affinity: {}
-
# -- Pod topology spread constraints for hubble-ui
topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
# -- Node labels for pod assignment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for pod assignment on nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
-
# -- The priority class to use for hubble-ui
priorityClassName: ""
-
# -- hubble-ui update strategy.
updateStrategy:
type: RollingUpdate
rollingUpdate:
+ # @schema
+ # type: [integer, string]
+ # @schema
maxUnavailable: 1
-
# -- Security context to be added to Hubble UI pods
securityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
-
# -- hubble-ui service configuration.
service:
# -- Annotations to be added for the Hubble UI service
@@ -1620,18 +1832,16 @@ charts:
type: ClusterIP
# --- The port to use when the service type is set to NodePort.
nodePort: 31235
-
# -- Defines base url prefix for all hubble-ui http requests.
# It needs to be changed in case if ingress for hubble-ui is configured under some sub-path.
# Trailing `/` is required for custom path, ex. `/service-map/`
baseUrl: "/"
-
# -- hubble-ui ingress configuration.
ingress:
enabled: false
annotations: {}
- # kubernetes.io/ingress.class: nginx
- # kubernetes.io/tls-acme: "true"
+ # kubernetes.io/ingress.class: nginx
+ # kubernetes.io/tls-acme: "true"
className: ""
hosts:
- chart-example.local
@@ -1640,8 +1850,7 @@ charts:
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
-
- # -- Hubble flows export.
+ # -- Hubble flows export.
export:
# --- Defines max file size of output file before it gets rotated.
fileMaxSizeMb: 10
@@ -1662,8 +1871,8 @@ charts:
denyList: []
# - '{"source_pod":["kube-system/"]}'
# - '{"destination_pod":["kube-system/"]}'
- # --- Dynamic exporters configuration.
- # Dynamic exporters may be reconfigured without a need of agent restarts.
+ # --- Dynamic exporters configuration.
+ # Dynamic exporters may be reconfigured without a need of agent restarts.
dynamic:
enabled: false
config:
@@ -1674,116 +1883,138 @@ charts:
createConfigMap: true
# ---- Exporters configuration in YAML format.
content:
- - name: all
- fieldMask: []
- includeFilters: []
- excludeFilters: []
- filePath: "/var/run/cilium/hubble/events.log"
- #- name: "test002"
- # filePath: "/var/log/network/flow-log/pa/test002.log"
- # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"]
- # includeFilters:
- # - source_pod: ["default/"]
- # event_type:
- # - type: 1
- # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"]
- # excludeFilters: []
- # end: "2023-10-09T23:59:59-07:00"
-
- # -- Method to use for identity allocation (`crd` or `kvstore`).
+ - name: all
+ fieldMask: []
+ includeFilters: []
+ excludeFilters: []
+ filePath: "/var/run/cilium/hubble/events.log"
+ # - name: "test002"
+ # filePath: "/var/log/network/flow-log/pa/test002.log"
+ # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"]
+ # includeFilters:
+ # - source_pod: ["default/"]
+ # event_type:
+ # - type: 1
+ # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"]
+ # excludeFilters: []
+ # end: "2023-10-09T23:59:59-07:00"
+ # -- Emit v1.Events related to pods on detection of packet drops.
+ # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975.
+ dropEventEmitter:
+ enabled: false
+ # --- Minimum time between emitting same events.
+ interval: 2m
+ # --- Drop reasons to emit events for.
+ # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason
+ reasons:
+ - auth_required
+ - policy_denied
+ # -- Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends).
identityAllocationMode: "crd"
-
# -- (string) Time to wait before using new identity on endpoint identity change.
# @default -- `"5s"`
identityChangeGracePeriod: ""
-
# -- Install Iptables rules to skip netfilter connection tracking on all pod
# traffic. This option is only effective when Cilium is running in direct
# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium
# is running in a managed Kubernetes environment or in a chained CNI setup.
installNoConntrackIptablesRules: false
-
ipam:
# -- Configure IP Address Management mode.
# ref: https://docs.cilium.io/en/stable/network/concepts/ipam/
- # For this pack, the default mode has been switched from "cluster-pool" to
- # "kubernetes" so that Cilium respects the PodCIDR that is configured
- # in the K8s pack.
mode: "kubernetes"
- # The alternative below is the default for the Cilium helm chart
- # mode: "cluster-pool"
- # # -- Maximum rate at which the CiliumNode custom resource is updated.
- # ciliumNodeUpdateRate: "15s"
- # operator:
- # # -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
- # clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"]
- # # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
- # clusterPoolIPv4MaskSize: 24
- # # -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
- # clusterPoolIPv6PodCIDRList: ["fd00::/104"]
- # # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
- # clusterPoolIPv6MaskSize: 120
- # # -- IP pools to auto-create in multi-pool IPAM mode.
- # autoCreateCiliumPodIPPools: {}
- # # default:
- # # ipv4:
- # # cidrs:
- # # - 10.10.0.0/8
- # # maskSize: 24
- # # other:
- # # ipv6:
- # # cidrs:
- # # - fd00:100::/80
- # # maskSize: 96
- # # -- The maximum burst size when rate limiting access to external APIs.
- # # Also known as the token bucket capacity.
- # # @default -- `20`
- # externalAPILimitBurstSize: ~
- # # -- The maximum queries per second when rate limiting access to
- # # external APIs. Also known as the bucket refill rate, which is used to
- # # refill the bucket up to the burst size capacity.
- # # @default -- `4.0`
- # externalAPILimitQPS: ~
-
+ # -- Maximum rate at which the CiliumNode custom resource is updated.
+ ciliumNodeUpdateRate: "15s"
+ # -- Pre-allocation settings for IPAM in Multi-Pool mode
+ multiPoolPreAllocation: ""
+ # -- Install ingress/egress routes through uplink on host for Pods when working with delegated IPAM plugin.
+ installUplinkRoutesForDelegatedIPAM: false
+ operator:
+ # @schema
+ # type: [array, string]
+ # @schema
+ # -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
+ clusterPoolIPv4PodCIDRList: [ "10.0.0.0/8" ]
+ # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
+ clusterPoolIPv4MaskSize: 24
+ # @schema
+ # type: [array, string]
+ # @schema
+ # -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
+ clusterPoolIPv6PodCIDRList: [ "fd00::/104" ]
+ # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
+ clusterPoolIPv6MaskSize: 120
+ # -- IP pools to auto-create in multi-pool IPAM mode.
+ autoCreateCiliumPodIPPools: {}
+ # default:
+ # ipv4:
+ # cidrs:
+ # - 10.10.0.0/8
+ # maskSize: 24
+ # other:
+ # ipv6:
+ # cidrs:
+ # - fd00:100::/80
+ # maskSize: 96
+ # @schema
+ # type: [null, integer]
+ # @schema
+ # -- (int) The maximum burst size when rate limiting access to external APIs.
+ # Also known as the token bucket capacity.
+ # @default -- `20`
+ externalAPILimitBurstSize: ~
+ # @schema
+ # type: [null, number]
+ # @schema
+ # -- (float) The maximum queries per second when rate limiting access to
+ # external APIs. Also known as the bucket refill rate, which is used to
+ # refill the bucket up to the burst size capacity.
+ # @default -- `4.0`
+ externalAPILimitQPS: ~
+ # -- defaultLBServiceIPAM indicates the default LoadBalancer Service IPAM when
+ # no LoadBalancer class is set. Applicable values: lbipam, nodeipam, none
+ # @schema
+ # type: [string]
+ # @schema
+ defaultLBServiceIPAM: lbipam
+ nodeIPAM:
+ # -- Configure Node IPAM
+ # ref: https://docs.cilium.io/en/stable/network/node-ipam/
+ enabled: false
+ # @schema
+ # type: [null, string]
+ # @schema
# -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API
apiRateLimit: ~
-
# -- Configure the eBPF-based ip-masq-agent
ipMasqAgent:
enabled: false
# the config of nonMasqueradeCIDRs
# config:
- # nonMasqueradeCIDRs: []
- # masqLinkLocal: false
- # masqLinkLocalIPv6: false
+ # nonMasqueradeCIDRs: []
+ # masqLinkLocal: false
+ # masqLinkLocalIPv6: false
# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium.
# iptablesLockTimeout: "5s"
-
ipv4:
# -- Enable IPv4 support.
enabled: true
-
ipv6:
# -- Enable IPv6 support.
enabled: false
-
# -- Configure Kubernetes specific configuration
- k8s: {}
+ k8s:
# -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR
# range via the Kubernetes node resource
- # requireIPv4PodCIDR: false
-
+ requireIPv4PodCIDR: false
# -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR
# range via the Kubernetes node resource
- # requireIPv6PodCIDR: false
-
+ requireIPv6PodCIDR: false
# -- Keep the deprecated selector labels when deploying Cilium DaemonSet.
keepDeprecatedLabels: false
-
# -- Keep the deprecated probes when deploying Cilium DaemonSet
keepDeprecatedProbes: false
-
startupProbe:
# -- failure threshold of startup probe.
# 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
@@ -1800,9 +2031,8 @@ charts:
failureThreshold: 3
# -- interval between checks of the readiness probe
periodSeconds: 30
-
# -- Configure the kube-proxy replacement in Cilium BPF datapath
- # Valid options are "true", "false", "disabled" (deprecated), "partial" (deprecated), "strict" (deprecated).
+ # Valid options are "true" or "false".
# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/
#kubeProxyReplacement: "false"
@@ -1811,19 +2041,15 @@ charts:
# addresses and this '[::]:10256' for all ipv6 addresses.
# By default it is disabled.
kubeProxyReplacementHealthzBindAddr: ""
-
l2NeighDiscovery:
# -- Enable L2 neighbor discovery in the agent
enabled: true
# -- Override the agent's default neighbor resolution refresh period.
refreshPeriod: "30s"
-
# -- Enable Layer 7 network policy.
l7Proxy: true
-
# -- Enable Local Redirect Policy.
localRedirectPolicy: false
-
# To include or exclude matched resources from cilium identity evaluation
# labels: ""
@@ -1833,56 +2059,50 @@ charts:
# -- Enables periodic logging of system load
logSystemLoad: false
-
# -- Configure maglev consistent hashing
maglev: {}
- # -- tableSize is the size (parameter M) for the backend table of one
- # service entry
- # tableSize:
+ # -- tableSize is the size (parameter M) for the backend table of one
+ # service entry
+ # tableSize:
- # -- hashSeed is the cluster-wide base64 encoded seed for the hashing
- # hashSeed:
+ # -- hashSeed is the cluster-wide base64 encoded seed for the hashing
+ # hashSeed:
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
enableIPv4Masquerade: true
-
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
enableIPv6Masquerade: true
-
# -- Enables masquerading to the source of the route for traffic leaving the node from endpoints.
enableMasqueradeRouteSource: false
-
# -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods
enableIPv4BIGTCP: false
-
# -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods
enableIPv6BIGTCP: false
-
+ nat:
+ # -- Number of the top-k SNAT map connections to track in Cilium statedb.
+ mapStatsEntries: 32
+ # -- Interval between how often SNAT map is counted for stats.
+ mapStatsInterval: 30s
egressGateway:
# -- Enables egress gateway to redirect and SNAT the traffic that leaves the
# cluster.
enabled: false
- # -- Deprecated without a replacement necessary.
- installRoutes: false
# -- Time between triggers of egress gateway state reconciliations
reconciliationTriggerInterval: 1s
# -- Maximum number of entries in egress gateway policy map
# maxPolicyEntries: 16384
-
vtep:
- # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow
- # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel.
+ # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow
+ # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel.
enabled: false
-
- # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1"
+ # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1"
endpoint: ""
- # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24"
+ # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24"
cidr: ""
- # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0"
+ # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0"
mask: ""
- # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y"
+ # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y"
mac: ""
-
# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing.
# When specified, Cilium assumes networking for this CIDR is preconfigured and
# hands traffic destined for that range to the Linux network stack without
@@ -1894,7 +2114,6 @@ charts:
# the user must configure the routes to reach pods, either manually or by
# setting the auto-direct-node-routes flag.
ipv4NativeRoutingCIDR: ""
-
# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing.
# When specified, Cilium assumes networking for this CIDR is preconfigured and
# hands traffic destined for that range to the Linux network stack without
@@ -1906,12 +2125,10 @@ charts:
# the user must configure the routes to reach pods, either manually or by
# setting the auto-direct-node-routes flag.
ipv6NativeRoutingCIDR: ""
-
# -- cilium-monitor sidecar.
monitor:
# -- Enable the cilium-monitor sidecar.
enabled: false
-
# -- Configure service load balancing
loadBalancer:
# -- standalone enables the standalone L4LB which does not connect to
@@ -1932,7 +2149,6 @@ charts:
# path), or best-effort (use native mode XDP acceleration on devices
# that support it).
acceleration: disabled
-
# -- dsrDispatch configures whether IP option or IPIP encapsulation is
# used to pass a service IP and port to remote backend
# dsrDispatch: opt
@@ -1941,6 +2157,9 @@ charts:
# endpoints filtering
# serviceTopology: false
+ # -- experimental enables support for the experimental load-balancing
+ # control-plane.
+ experimental: false
# -- L7 LoadBalancer
l7:
# -- Enable L7 service load balancing via envoy proxy.
@@ -1961,40 +2180,46 @@ charts:
# service annotation (e.g. service.cilium.io/lb-l7-algorithm)
# Applicable values: round_robin, least_request, random
algorithm: round_robin
-
# -- Configure N-S k8s service loadbalancing
nodePort:
# -- Enable the Cilium NodePort service implementation.
enabled: false
-
# -- Port range to use for NodePort services.
# range: "30000,32767"
+ # @schema
+ # type: [null, string, array]
+ # @schema
+ # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing.
+ # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used.
+ #
+ # Example:
+ #
+ # addresses: ["192.168.1.0/24", "2001::/64"]
+ #
+ addresses: ~
# -- Set to true to prevent applications binding to service ports.
bindProtection: true
-
# -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral
# ports is detected.
autoProtectPortRange: true
-
# -- Enable healthcheck nodePort server for NodePort services
enableHealthCheck: true
-
# -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs
# EnableHealthCheck to be enabled
enableHealthCheckLoadBalancerIP: false
-
# policyAuditMode: false
# -- The agent can be put into one of the three policy enforcement modes:
# default, always and never.
# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes
policyEnforcementMode: "default"
-
+ # @schema
+ # type: [null, string, array]
+ # @schema
# -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector.
# The possible value is "nodes".
policyCIDRMatchMode:
-
pprof:
# -- Enable pprof for cilium-agent
enabled: false
@@ -2002,9 +2227,9 @@ charts:
address: localhost
# -- Configure pprof listen port for cilium-agent
port: 6060
-
# -- Configure prometheus metrics on the configured port at /metrics
prometheus:
+ metricsService: false
enabled: false
port: 9962
serviceMonitor:
@@ -2028,17 +2253,21 @@ charts:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor cilium-agent
metricRelabelings: ~
# -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying
trustCRDsExist: false
-
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics that should be enabled or disabled from the default metric list.
# The list is expected to be separated by a space. (+metric_foo to enable
# metric_foo , -metric_bar to disable metric_bar).
# ref: https://docs.cilium.io/en/stable/observability/metrics/
metrics: ~
-
# --- Enable controller group metrics for monitoring specific Cilium
# subsystems. The list is a list of controller group names. The special
# values of "all" and "none" are supported. The set of controller
@@ -2047,42 +2276,77 @@ charts:
- write-cni-file
- sync-host-ips
- sync-lb-maps-with-k8s-services
-
# -- Grafana dashboards for cilium-agent
# grafana can import dashboards based on the label and value
# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
dashboards:
enabled: false
label: grafana_dashboard
+ # @schema
+ # type: [null, string]
+ # @schema
namespace: ~
labelValue: "1"
annotations: {}
-
- # -- Configure Istio proxy options.
- proxy:
-
- prometheus:
- # -- Deprecated in favor of envoy.prometheus.enabled
- enabled: true
- # -- Deprecated in favor of envoy.prometheus.port
- port: ~
- # -- Regular expression matching compatible Istio sidecar istio-proxy
- # container image names
- sidecarImageRegex: "cilium/istio_proxy"
-
# Configure Cilium Envoy options.
envoy:
+ # @schema
+ # type: [null, boolean]
+ # @schema
# -- Enable Envoy Proxy in standalone DaemonSet.
- enabled: false
-
+ # This field is enabled by default for new installation.
+ # @default -- `true` for new installation
+ enabled: ~
+ # -- (int)
+ # Set Envoy'--base-id' to use when allocating shared memory regions.
+ # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0'
+ baseID: 0
log:
- # -- The format string to use for laying out the log message metadata of Envoy.
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- The format string to use for laying out the log message metadata of Envoy. If specified, Envoy will use text format output.
+ # This setting is mutually exclusive with envoy.log.format_json.
format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"
+ # @schema
+ # type: [null, object]
+ # @schema
+ # -- The JSON logging format to use for Envoy. This setting is mutually exclusive with envoy.log.format.
+ # ref: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/bootstrap/v3/bootstrap.proto#envoy-v3-api-field-config-bootstrap-v3-bootstrap-applicationlogconfig-logformat-json-format
+ format_json: null
+ # date: "%Y-%m-%dT%T.%e"
+ # thread_id: "%t"
+ # source_line: "%s:%#"
+ # level: "%l"
+ # logger: "%n"
+ # message: "%j"
# -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout.
path: ""
-
+ # @schema
+ # oneOf:
+ # - type: [null]
+ # - enum: [trace,debug,info,warning,error,critical,off]
+ # @schema
+ # -- Default log level of Envoy application log that is configured if Cilium debug / verbose logging isn't enabled.
+ # This option allows to have a different log level than the Cilium Agent - e.g. lower it to `critical`.
+ # Possible values: trace, debug, info, warning, error, critical, off
+ # @default -- Defaults to the default log level of the Cilium Agent - `info`
+ defaultLevel: ~
+ # @schema
+ # type: [null, integer]
+ # @schema
+ # -- Size of the Envoy access log buffer created within the agent in bytes.
+ # Tune this value up if you encounter "Envoy: Discarded truncated access log message" errors.
+ # Large request/response header sizes (e.g. 16KiB) will require a larger buffer size.
+ accessLogBufferSize: 4096
# -- Time in seconds after which a TCP connection attempt times out
connectTimeoutSeconds: 2
+ # -- Time in seconds after which the initial fetch on an xDS stream is considered timed out
+ initialFetchTimeoutSeconds: 30
+ # -- Maximum number of concurrent retries on Envoy clusters
+ maxConcurrentRetries: 128
+ # -- Maximum number of retries for each HTTP request
+ httpRetryCount: 3
# -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy
maxRequestsPerConnection: 0
# -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable)
@@ -2090,76 +2354,84 @@ charts:
# -- Set Envoy upstream HTTP idle connection timeout seconds.
# Does not apply to connections with pending requests. Default 60s
idleTimeoutDurationSeconds: 60
-
+ # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners.
+ xffNumTrustedHopsL7PolicyIngress: 0
+ # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners.
+ xffNumTrustedHopsL7PolicyEgress: 0
# -- Envoy container image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/cilium-envoy"
- tag: "v1.27.3-99c1c8f42c8de70fc8f6dd594f4a425cd38b6688"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/cilium-envoy"
+ tag: "v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae"
pullPolicy: "IfNotPresent"
- digest: "sha256:877ead12d08d4c04a9f67f86d3c6e542aeb7bf97e1e401aee74de456f496ac30"
- useDigest: true
-
+ digest: ""
+ useDigest: false
# -- Additional containers added to the cilium Envoy DaemonSet.
extraContainers: []
-
# -- Additional envoy container arguments.
extraArgs: []
-
# -- Additional envoy container environment variables.
extraEnv: []
-
# -- Additional envoy hostPath mounts.
extraHostPathMounts: []
- # - name: host-mnt-data
- # mountPath: /host/mnt/data
- # hostPath: /mnt/data
- # hostPathType: Directory
- # readOnly: true
- # mountPropagation: HostToContainer
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
# -- Additional envoy volumes.
extraVolumes: []
-
# -- Additional envoy volumeMounts.
extraVolumeMounts: []
-
# -- Configure termination grace period for cilium-envoy DaemonSet.
terminationGracePeriodSeconds: 1
-
# -- TCP port for the health API.
healthPort: 9878
-
# -- cilium-envoy update strategy
# ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset
updateStrategy:
type: RollingUpdate
rollingUpdate:
+ # @schema
+ # type: [integer, string]
+ # @schema
maxUnavailable: 2
# -- Roll out cilium envoy pods automatically when configmap is updated.
rollOutPods: false
-
+ # -- ADVANCED OPTION: Bring your own custom Envoy bootstrap ConfigMap. Provide the name of a ConfigMap with a `bootstrap-config.json` key.
+ # When specified, Envoy will use this ConfigMap instead of the default provided by the chart.
+ # WARNING: Use of this setting has the potential to prevent cilium-envoy from starting up, and can cause unexpected behavior (e.g. due to
+ # syntax error or semantically incorrect configuration). Before submitting an issue, please ensure you have disabled this feature, as support
+ # cannot be provided for custom Envoy bootstrap configs.
+ # @schema
+ # type: [null, string]
+ # @schema
+ bootstrapConfigMap: ~
# -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy)
annotations: {}
-
# -- Security Context for cilium-envoy pods.
- podSecurityContext: {}
-
+ podSecurityContext:
+ # -- AppArmorProfile options for the `cilium-agent` and init containers
+ appArmorProfile:
+ type: "Unconfined"
# -- Annotations to be added to envoy pods
podAnnotations: {}
-
# -- Labels to be added to envoy pods
podLabels: {}
-
# -- Envoy resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
- # limits:
- # cpu: 4000m
- # memory: 4Gi
- # requests:
- # cpu: 100m
- # memory: 512Mi
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
startupProbe:
# -- failure threshold of startup probe.
@@ -2177,7 +2449,6 @@ charts:
failureThreshold: 3
# -- interval between checks of the readiness probe
periodSeconds: 30
-
securityContext:
# -- User to run the pod with
# runAsUser: 0
@@ -2191,7 +2462,13 @@ charts:
# type available on the system.
type: 'spc_t'
capabilities:
- # -- Capabilities for the `cilium-envoy` container
+ # -- Capabilities for the `cilium-envoy` container.
+ # Even though granted to the container, the cilium-envoy-starter wrapper drops
+ # all capabilities after forking the actual Envoy process.
+ # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by
+ # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the
+ # capability to the container).
+ # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container.
envoy:
# Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT
- NET_ADMIN
@@ -2204,49 +2481,60 @@ charts:
# If available, SYS_ADMIN can be removed.
#- PERFMON
#- BPF
-
+ # -- Keep capability `NET_BIND_SERVICE` for Envoy process.
+ keepCapNetBindService: false
# -- Affinity for cilium-envoy.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- k8s-app: cilium-envoy
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium-envoy
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- k8s-app: cilium
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- - key: cilium.io/no-schedule
- operator: NotIn
- values:
- - "true"
+ - key: cilium.io/no-schedule
+ operator: NotIn
+ values:
+ - "true"
# -- Node selector for cilium-envoy.
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for envoy scheduling to nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- - operator: Exists
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # -- The priority class to use for cilium-envoy.
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- The priority class to use for cilium-envoy.
priorityClassName: ~
-
+ # @schema
+ # type: [null, string]
+ # @schema
# -- DNS policy for Cilium envoy pods.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ~
-
+ debug:
+ admin:
+ # -- Enable admin interface for cilium-envoy.
+ # This is useful for debugging and should not be enabled in production.
+ enabled: false
+ # -- Port number (bound to loopback interface).
+ # kubectl port-forward can be used to access the admin interface.
+ port: 9901
# -- Configure Cilium Envoy Prometheus options.
# Note that some of these apply to either cilium-agent or cilium-envoy.
prometheus:
@@ -2274,17 +2562,16 @@ charts:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
metricRelabelings: ~
# -- Serve prometheus metrics for cilium-envoy on the configured port
port: "9964"
-
- # -- Enable use of the remote node identity.
- # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
- # Deprecated without replacement in 1.15. To be removed in 1.16.
- remoteNodeIdentity: true
-
+ # -- Enable/Disable use of node label based identity
+ nodeSelectorLabels: false
# -- Enable resource quotas for priority classes used in the cluster.
resourceQuotas:
enabled: false
@@ -2296,7 +2583,6 @@ charts:
hard:
# 15 "clusterwide" Cilium Operator pods for HA
pods: "15"
-
# Need to document default
##################
#sessionAffinity: false
@@ -2305,22 +2591,48 @@ charts:
# uninstall Cilium as it will stop Cilium from starting and create artifacts
# in the node.
sleepAfterInit: false
-
# -- Enable check of service source ranges (currently, only for LoadBalancer).
svcSourceRangeCheck: true
-
# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC.
synchronizeK8sNodes: true
-
# -- Configure TLS configuration in the agent.
tls:
+ # @schema
+ # type: [null, string]
+ # @schema
# -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies
# (namely the secrets referenced by terminatingTLS and originatingTLS).
+ # This value is DEPRECATED and will be removed in a future version.
+ # Use `tls.readSecretsOnlyFromSecretsNamespace` instead.
# Possible values:
# - local
# - k8s
- secretsBackend: local
-
+ secretsBackend: ~
+ # @schema
+ # type: [null, boolean]
+ # @schema
+ # -- Configure if the Cilium Agent will only look in `tls.secretsNamespace` for
+ # CiliumNetworkPolicy relevant Secrets.
+ # If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access
+ # to _all_ secrets in the entire cluster. This is not recommended and is
+ # included for backwards compatibility.
+ # This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old
+ # setting, and `false` == `k8s`.
+ readSecretsOnlyFromSecretsNamespace: ~
+ # -- Configures where secrets used in CiliumNetworkPolicies will be looked for
+ secretsNamespace:
+ # -- Create secrets namespace for TLS Interception secrets.
+ create: true
+ # -- Name of TLS Interception secret namespace.
+ name: cilium-secrets
+ # -- Configures settings for synchronization of TLS Interception Secrets
+ secretSync:
+ # @schema
+ # type: [null, boolean]
+ # @schema
+ # -- Enable synchronization of Secrets for TLS Interception. If disabled and
+ # tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent.
+ enabled: ~
# -- Base64 encoded PEM values for the CA certificate and private key.
# This can be used as common CA to generate certificates used by hubble and clustermesh components.
# It is neither required nor used when cert-manager is used to generate the certificates.
@@ -2328,30 +2640,23 @@ charts:
# -- Optional CA cert. If it is provided, it will be used by cilium to
# generate all other certificates. Otherwise, an ephemeral CA is generated.
cert: ""
-
# -- Optional CA private key. If it is provided, it will be used by cilium to
# generate all other certificates. Otherwise, an ephemeral CA is generated.
key: ""
-
# -- Generated certificates validity duration in days. This will be used for auto generated CA.
certValidityDuration: 1095
-
# -- Configure the CA trust bundle used for the validation of the certificates
# leveraged by hubble and clustermesh. When enabled, it overrides the content of the
# 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time.
caBundle:
# -- Enable the use of the CA trust bundle.
enabled: false
-
# -- Name of the ConfigMap containing the CA trust bundle.
name: cilium-root-ca.crt
-
# -- Entry of the ConfigMap containing the CA trust bundle.
key: ca.crt
-
# -- Use a Secret instead of a ConfigMap.
useSecret: false
-
# If uncommented, creates the ConfigMap and fills it with the specified content.
# Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace.
#
@@ -2362,15 +2667,13 @@ charts:
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
-
- # -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels.
- # Possible values:
- # - ""
- # - vxlan
- # - geneve
- # @default -- `"vxlan"`
+ # -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels.
+ # Possible values:
+ # - ""
+ # - vxlan
+ # - geneve
+ # @default -- `"vxlan"`
tunnelProtocol: ""
-
# -- Enable native-routing mode or tunneling mode.
# Possible values:
# - ""
@@ -2378,276 +2681,166 @@ charts:
# - tunnel
# @default -- `"tunnel"`
routingMode: ""
-
# -- Configure VXLAN and Geneve tunnel port.
# @default -- Port 8472 for VXLAN, Port 6081 for Geneve
tunnelPort: 0
-
# -- Configure what the response should be to traffic for a service without backends.
- # "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop".
# Possible values:
# - reject (default)
# - drop
serviceNoBackendResponse: reject
-
# -- Configure the underlying network MTU to overwrite auto-detected MTU.
+ # This value doesn't change the host network interface MTU i.e. eth0 or ens0.
+ # It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net,
+ # cilium_vxlan and lxc_health interfaces.
MTU: 0
-
# -- Disable the usage of CiliumEndpoint CRD.
disableEndpointCRD: false
-
wellKnownIdentities:
# -- Enable the use of well-known identities.
enabled: false
-
etcd:
# -- Enable etcd mode for the agent.
enabled: false
-
- # -- cilium-etcd-operator image.
- image:
- override: ~
- repository: "quay.io/cilium/cilium-etcd-operator"
- tag: "v2.0.7"
- digest: "sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"
- useDigest: true
- pullPolicy: "IfNotPresent"
-
- # -- The priority class to use for cilium-etcd-operator
- priorityClassName: ""
-
- # -- Additional cilium-etcd-operator container arguments.
- extraArgs: []
-
- # -- Additional cilium-etcd-operator volumes.
- extraVolumes: []
-
- # -- Additional cilium-etcd-operator volumeMounts.
- extraVolumeMounts: []
-
- # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints
- # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
- tolerations:
- - operator: Exists
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # -- Pod topology spread constraints for cilium-etcd-operator
- topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
-
- # -- Node labels for cilium-etcd-operator pod assignment
- # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
- nodeSelector:
- kubernetes.io/os: linux
-
- # -- Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator)
- annotations: {}
-
- # -- Security context to be added to cilium-etcd-operator pods
- podSecurityContext: {}
-
- # -- Annotations to be added to cilium-etcd-operator pods
- podAnnotations: {}
-
- # -- Labels to be added to cilium-etcd-operator pods
- podLabels: {}
-
- # PodDisruptionBudget settings
- podDisruptionBudget:
- # -- enable PodDisruptionBudget
- # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
- enabled: false
- # -- Minimum number/percentage of pods that should remain scheduled.
- # When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
- minAvailable: null
- # -- Maximum number/percentage of pods that may be made unavailable
- maxUnavailable: 1
-
- # -- cilium-etcd-operator resource limits & requests
- # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
- resources: {}
- # limits:
- # cpu: 4000m
- # memory: 4Gi
- # requests:
- # cpu: 100m
- # memory: 512Mi
-
- # -- Security context to be added to cilium-etcd-operator pods
- securityContext: {}
- # runAsUser: 0
-
- # -- cilium-etcd-operator update strategy
- updateStrategy:
- type: RollingUpdate
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 1
-
- # -- If etcd is behind a k8s service set this option to true so that Cilium
- # does the service translation automatically without requiring a DNS to be
- # running.
- k8sService: false
-
- # -- Cluster domain for cilium-etcd-operator.
- clusterDomain: cluster.local
-
- # -- List of etcd endpoints (not needed when using managed=true).
+ # -- List of etcd endpoints
endpoints:
- https://CHANGE-ME:2379
-
- # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if
- # managed=true)
+ # -- Enable use of TLS/SSL for connectivity to etcd.
ssl: false
-
operator:
# -- Enable the cilium-operator component (required).
enabled: true
-
# -- Roll out cilium-operator pods automatically when configmap is updated.
rollOutPods: false
-
# -- cilium-operator image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/operator"
- tag: "v1.15.3"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/operator"
+ tag: "v1.17.1"
# operator-generic-digest
- genericDigest: ""
+ genericdigest: ""
# operator-azure-digest
- azureDigest: ""
+ azuredigest: ""
# operator-aws-digest
- awsDigest: ""
+ awsdigest: ""
# operator-alibabacloud-digest
- alibabacloudDigest: ""
+ alibabaclouddigest: ""
useDigest: false
pullPolicy: "IfNotPresent"
suffix: ""
-
# -- Number of replicas to run for the cilium-operator deployment
replicas: 2
-
# -- The priority class to use for cilium-operator
priorityClassName: ""
-
# -- DNS policy for Cilium operator pods.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ""
-
# -- cilium-operator update strategy
updateStrategy:
type: RollingUpdate
rollingUpdate:
+ # @schema
+ # type: [integer, string]
+ # @schema
maxSurge: 25%
+ # @schema
+ # type: [integer, string]
+ # @schema
maxUnavailable: 50%
-
# -- Affinity for cilium-operator
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- io.cilium/app: operator
-
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ io.cilium/app: operator
# -- Pod topology spread constraints for cilium-operator
topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
# -- Node labels for cilium-operator pod assignment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for cilium-operator scheduling to nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- - operator: Exists
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # -- Additional cilium-operator container arguments.
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+ # -- Additional cilium-operator container arguments.
extraArgs: []
-
# -- Additional cilium-operator environment variables.
extraEnv: []
-
# -- Additional cilium-operator hostPath mounts.
extraHostPathMounts: []
- # - name: host-mnt-data
- # mountPath: /host/mnt/data
- # hostPath: /mnt/data
- # hostPathType: Directory
- # readOnly: true
- # mountPropagation: HostToContainer
+ # - name: host-mnt-data
+ # mountPath: /host/mnt/data
+ # hostPath: /mnt/data
+ # hostPathType: Directory
+ # readOnly: true
+ # mountPropagation: HostToContainer
# -- Additional cilium-operator volumes.
extraVolumes: []
-
# -- Additional cilium-operator volumeMounts.
extraVolumeMounts: []
-
# -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator)
annotations: {}
-
+ # -- HostNetwork setting
+ hostNetwork: true
# -- Security context to be added to cilium-operator pods
podSecurityContext: {}
-
# -- Annotations to be added to cilium-operator pods
podAnnotations: {}
-
# -- Labels to be added to cilium-operator pods
podLabels: {}
-
# PodDisruptionBudget settings
podDisruptionBudget:
# -- enable PodDisruptionBudget
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
enabled: false
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Minimum number/percentage of pods that should remain scheduled.
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
minAvailable: null
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
-
# -- cilium-operator resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
- # limits:
- # cpu: 1000m
- # memory: 1Gi
- # requests:
- # cpu: 100m
- # memory: 128Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1Gi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
# -- Security context to be added to cilium-operator pods
securityContext: {}
- # runAsUser: 0
+ # runAsUser: 0
# -- Interval for endpoint garbage collection.
endpointGCInterval: "5m0s"
-
# -- Interval for cilium node garbage collection.
nodeGCInterval: "5m0s"
-
- # -- Skip CNP node status clean up at operator startup.
- skipCNPStatusStartupClean: false
-
# -- Interval for identity garbage collection.
identityGCInterval: "15m0s"
-
# -- Timeout for identity heartbeats.
identityHeartbeatTimeout: "30m0s"
-
pprof:
# -- Enable pprof for cilium-operator
enabled: false
@@ -2655,10 +2848,10 @@ charts:
address: localhost
# -- Configure pprof listen port for cilium-operator
port: 6061
-
# -- Enable prometheus metrics for cilium-operator on the configured port at
# /metrics
prometheus:
+ metricsService: false
enabled: true
port: 9963
serviceMonitor:
@@ -2673,104 +2866,105 @@ charts:
jobLabel: ""
# -- Interval for scrape metrics.
interval: "10s"
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Relabeling configs for the ServiceMonitor cilium-operator
relabelings: ~
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor cilium-operator
metricRelabelings: ~
-
# -- Grafana dashboards for cilium-operator
# grafana can import dashboards based on the label and value
# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
dashboards:
enabled: false
label: grafana_dashboard
+ # @schema
+ # type: [null, string]
+ # @schema
namespace: ~
labelValue: "1"
annotations: {}
-
# -- Skip CRDs creation for cilium-operator
skipCRDCreation: false
-
# -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium
# pod running.
removeNodeTaints: true
-
+ # @schema
+ # type: [null, boolean]
+ # @schema
# -- Taint nodes where Cilium is scheduled but not running. This prevents pods
# from being scheduled to nodes where Cilium is not the default CNI provider.
# @default -- same as removeNodeTaints
setNodeTaints: ~
-
# -- Set Node condition NetworkUnavailable to 'false' with the reason
# 'CiliumIsUp' for nodes that have a healthy Cilium pod.
setNodeNetworkStatus: true
-
unmanagedPodWatcher:
# -- Restart any pod that are not managed by Cilium.
restart: true
# -- Interval, in seconds, to check if there are any pods that are not
# managed by Cilium.
intervalSeconds: 15
-
nodeinit:
# -- Enable the node initialization DaemonSet
enabled: false
-
# -- node-init image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/startup-script"
- tag: "62093c5c233ea914bfa26a10ba41f8780d9b737f"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/startup-script"
+ tag: "c54c7edeab7fde4da68e59acd319ab24af242c3f"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
-
# -- The priority class to use for the nodeinit pod.
priorityClassName: ""
-
# -- node-init update strategy
updateStrategy:
type: RollingUpdate
-
# -- Additional nodeinit environment variables.
extraEnv: []
-
# -- Additional nodeinit volumes.
extraVolumes: []
-
# -- Additional nodeinit volumeMounts.
extraVolumeMounts: []
-
# -- Affinity for cilium-nodeinit
affinity: {}
-
# -- Node labels for nodeinit pod assignment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for nodeinit scheduling to nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- - operator: Exists
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit)
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+ # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit)
annotations: {}
-
# -- Annotations to be added to node-init pods.
podAnnotations: {}
-
# -- Labels to be added to node-init pods.
podLabels: {}
-
+ # -- Security Context for cilium-node-init pods.
+ podSecurityContext:
+ # -- AppArmorProfile options for the `cilium-node-init` and init containers
+ appArmorProfile:
+ type: "Unconfined"
# -- nodeinit resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
requests:
cpu: 100m
memory: 100Mi
-
# -- Security context to be added to nodeinit pods.
securityContext:
privileged: false
@@ -2789,11 +2983,9 @@ charts:
- SYS_ADMIN
- SYS_CHROOT
- SYS_PTRACE
-
# -- bootstrapFile is the location of the file where the bootstrap timestamp is
# written by the node-init DaemonSet
bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time"
-
# -- startup offers way to customize startup nodeinit script (pre and post position)
startup:
preScript: ""
@@ -2802,129 +2994,112 @@ charts:
prestop:
preScript: ""
postScript: ""
-
preflight:
# -- Enable Cilium pre-flight resources (required for upgrade)
enabled: false
-
# -- Cilium pre-flight image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/cilium"
- tag: "v1.15.3"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/cilium"
+ tag: "v1.17.1"
# cilium-digest
digest: ""
useDigest: false
pullPolicy: "IfNotPresent"
-
# -- The priority class to use for the preflight pod.
priorityClassName: ""
-
# -- preflight update strategy
updateStrategy:
type: RollingUpdate
-
# -- Additional preflight environment variables.
extraEnv: []
-
# -- Additional preflight volumes.
extraVolumes: []
-
# -- Additional preflight volumeMounts.
extraVolumeMounts: []
-
# -- Affinity for cilium-preflight
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- k8s-app: cilium
-
+ - topologyKey: kubernetes.io/hostname
+ labelSelector:
+ matchLabels:
+ k8s-app: cilium
# -- Node labels for preflight pod assignment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for preflight scheduling to nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- - key: node.kubernetes.io/not-ready
- effect: NoSchedule
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- - key: node-role.kubernetes.io/control-plane
- effect: NoSchedule
- - key: node.cloudprovider.kubernetes.io/uninitialized
- effect: NoSchedule
- value: "true"
- - key: CriticalAddonsOnly
- operator: "Exists"
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight)
+ - operator: Exists
+ # - key: "key"
+ # operator: "Equal|Exists"
+ # value: "value"
+ # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
+ # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight)
annotations: {}
-
# -- Security context to be added to preflight pods.
podSecurityContext: {}
-
# -- Annotations to be added to preflight pods
podAnnotations: {}
-
# -- Labels to be added to the preflight pod.
podLabels: {}
-
# PodDisruptionBudget settings
podDisruptionBudget:
# -- enable PodDisruptionBudget
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
enabled: false
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Minimum number/percentage of pods that should remain scheduled.
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
minAvailable: null
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
-
# -- preflight resource limits & requests
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources: {}
- # limits:
- # cpu: 4000m
- # memory: 4Gi
- # requests:
- # cpu: 100m
- # memory: 512Mi
+ # limits:
+ # cpu: 4000m
+ # memory: 4Gi
+ # requests:
+ # cpu: 100m
+ # memory: 512Mi
+ readinessProbe:
+ # -- For how long kubelet should wait before performing the first probe
+ initialDelaySeconds: 5
+ # -- interval between checks of the readiness probe
+ periodSeconds: 5
# -- Security context to be added to preflight pods
securityContext: {}
- # runAsUser: 0
+ # runAsUser: 0
# -- Path to write the `--tofqdns-pre-cache` file to.
tofqdnsPreCache: ""
-
# -- Configure termination grace period for preflight Deployment and DaemonSet.
terminationGracePeriodSeconds: 1
-
# -- By default we should always validate the installed CNPs before upgrading
# Cilium. This will make sure the user will have the policies deployed in the
# cluster with the right schema.
validateCNPs: true
-
# -- Explicitly enable or disable priority class.
# .Capabilities.KubeVersion is unsettable in `helm template` calls,
# it depends on k8s libraries version that Helm was compiled against.
# This option allows to explicitly disable setting the priority class, which
# is useful for rendering charts for gke clusters in advance.
enableCriticalPriorityClass: true
-
# disableEnvoyVersionCheck removes the check for Envoy, which can be useful
# on AArch64 as the images do not currently ship a version of Envoy.
#disableEnvoyVersionCheck: false
-
clustermesh:
# -- Deploy clustermesh-apiserver for clustermesh
useAPIServer: false
@@ -2934,10 +3109,13 @@ charts:
# maximum allocatable cluster-local identities.
# Supported values are 255 and 511.
maxConnectedClusters: 255
-
+ # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to
+ # the remote endpoints of appropriately-annotated global services through ClusterMesh
+ enableEndpointSliceSynchronization: false
+ # -- Enable Multi-Cluster Services API support
+ enableMCSAPISupport: false
# -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config)
annotations: {}
-
# -- Clustermesh explicit configuration.
config:
# -- Enable the Clustermesh explicit configuration.
@@ -2967,18 +3145,23 @@ charts:
# cert: ""
# key: ""
# caCert: ""
-
apiserver:
# -- Clustermesh API server image.
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "quay.io/cilium/clustermesh-apiserver"
- tag: "v1.15.3"
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/clustermesh-apiserver"
+ tag: "v1.17.1"
# clustermesh-apiserver-digest
digest: ""
useDigest: false
pullPolicy: "IfNotPresent"
-
+ # -- TCP port for the clustermesh-apiserver health API.
+ healthPort: 9880
+ # -- Configuration for the clustermesh-apiserver readiness probe.
+ readinessProbe: {}
etcd:
# The etcd binary is included in the clustermesh API server image, so the same image from above is reused.
# Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is
@@ -2994,11 +3177,13 @@ charts:
# memory: 256Mi
# -- Security context to be added to clustermesh-apiserver etcd containers
- securityContext: {}
-
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
# -- lifecycle setting for the etcd container
lifecycle: {}
-
init:
# -- Specifies the resources for etcd init container in the apiserver
resources: {}
@@ -3011,43 +3196,48 @@ charts:
# -- Additional arguments to `clustermesh-apiserver etcdinit`.
extraArgs: []
-
# -- Additional environment variables to `clustermesh-apiserver etcdinit`.
extraEnv: []
-
+ # @schema
+ # enum: [Disk, Memory]
+ # @schema
+ # -- Specifies whether etcd data is stored in a temporary volume backed by
+ # the node's default medium, such as disk, SSD or network storage (Disk), or
+ # RAM (Memory). The Memory option enables improved etcd read and write
+ # performance at the cost of additional memory usage, which counts against
+ # the memory limits of the container.
+ storageMedium: Disk
kvstoremesh:
# -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved
# from the remote clusters in the local etcd instance.
- enabled: false
-
+ enabled: true
+ # -- TCP port for the KVStoreMesh health API.
+ healthPort: 9881
+ # -- Configuration for the KVStoreMesh readiness probe.
+ readinessProbe: {}
# -- Additional KVStoreMesh arguments.
extraArgs: []
-
# -- Additional KVStoreMesh environment variables.
extraEnv: []
-
# -- Resource requests and limits for the KVStoreMesh container
resources: {}
- # requests:
- # cpu: 100m
- # memory: 64Mi
- # limits:
- # cpu: 1000m
- # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
# -- Additional KVStoreMesh volumeMounts.
extraVolumeMounts: []
-
# -- KVStoreMesh Security context
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
-
# -- lifecycle setting for the KVStoreMesh container
lifecycle: {}
-
service:
# -- The type of service used for apiserver access.
type: NodePort
@@ -3061,114 +3251,150 @@ charts:
# NodePort will be redirected to a local backend, regardless of whether the
# destination node belongs to the local or the remote cluster.
nodePort: 32379
- # -- Optional loadBalancer IP address to use with type LoadBalancer.
- # loadBalancerIP:
-
- # -- Annotations for the clustermesh-apiserver
- # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal"
- # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
+ # -- Annotations for the clustermesh-apiserver service.
+ # Example annotations to configure an internal load balancer on different cloud providers:
+ # * AKS: service.beta.kubernetes.io/azure-load-balancer-internal: "true"
+ # * EKS: service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
+ # * GKE: networking.gke.io/load-balancer-type: "Internal"
annotations: {}
-
+ # @schema
+ # enum: [Local, Cluster]
+ # @schema
# -- The externalTrafficPolicy of service used for apiserver access.
- externalTrafficPolicy:
-
+ externalTrafficPolicy: Cluster
+ # @schema
+ # enum: [Local, Cluster]
+ # @schema
# -- The internalTrafficPolicy of service used for apiserver access.
- internalTrafficPolicy:
-
+ internalTrafficPolicy: Cluster
+ # @schema
+ # enum: [HAOnly, Always, Never]
+ # @schema
+ # -- Defines when to enable session affinity.
+ # Each replica in a clustermesh-apiserver deployment runs its own discrete
+ # etcd cluster. Remote clients connect to one of the replicas through a
+ # shared Kubernetes Service. A client reconnecting to a different backend
+ # will require a full resync to ensure data integrity. Session affinity
+ # can reduce the likelihood of this happening, but may not be supported
+ # by all cloud providers.
+ # Possible values:
+ # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica.
+ # - "Always" Always enable session affinity.
+ # - "Never" Never enable session affinity. Useful in environments where
+ # session affinity is not supported, but may lead to slightly
+ # degraded performance due to more frequent reconnections.
+ enableSessionAffinity: "HAOnly"
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- Configure a loadBalancerClass.
+ # Allows to configure the loadBalancerClass on the clustermesh-apiserver
+ # LB service in case the Service type is set to LoadBalancer
+ # (requires Kubernetes 1.24+).
+ loadBalancerClass: ~
+ # @schema
+ # type: [null, string]
+ # @schema
+ # -- Configure a specific loadBalancerIP.
+ # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver
+ # LB service in case the Service type is set to LoadBalancer.
+ loadBalancerIP: ~
+ # -- Configure loadBalancerSourceRanges.
+ # Allows to configure the source IP ranges allowed to access the
+ # clustermesh-apiserver LB service in case the Service type is set to LoadBalancer.
+ loadBalancerSourceRanges: []
# -- Number of replicas run for the clustermesh-apiserver deployment.
replicas: 1
-
# -- lifecycle setting for the apiserver container
lifecycle: {}
-
# -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment
terminationGracePeriodSeconds: 30
-
# -- Additional clustermesh-apiserver arguments.
extraArgs: []
-
# -- Additional clustermesh-apiserver environment variables.
extraEnv: []
-
# -- Additional clustermesh-apiserver volumes.
extraVolumes: []
-
# -- Additional clustermesh-apiserver volumeMounts.
extraVolumeMounts: []
-
# -- Security context to be added to clustermesh-apiserver containers
- securityContext: {}
-
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
# -- Security context to be added to clustermesh-apiserver pods
- podSecurityContext: {}
-
+ podSecurityContext:
+ runAsNonRoot: true
+ runAsUser: 65532
+ runAsGroup: 65532
+ fsGroup: 65532
# -- Annotations to be added to clustermesh-apiserver pods
podAnnotations: {}
-
# -- Labels to be added to clustermesh-apiserver pods
podLabels: {}
-
# PodDisruptionBudget settings
podDisruptionBudget:
# -- enable PodDisruptionBudget
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
enabled: false
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Minimum number/percentage of pods that should remain scheduled.
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
minAvailable: null
+ # @schema
+ # type: [null, integer, string]
+ # @schema
# -- Maximum number/percentage of pods that may be made unavailable
maxUnavailable: 1
-
- # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as
- # resources:
- # limits:
- # cpu: 1000m
- # memory: 1024M
- # requests:
- # cpu: 100m
- # memory: 64Mi
# -- Resource requests and limits for the clustermesh-apiserver
resources: {}
- # requests:
- # cpu: 100m
- # memory: 64Mi
- # limits:
- # cpu: 1000m
- # memory: 1024M
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+ # limits:
+ # cpu: 1000m
+ # memory: 1024M
# -- Affinity for clustermesh.apiserver
affinity:
podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - topologyKey: kubernetes.io/hostname
- labelSelector:
- matchLabels:
- k8s-app: clustermesh-apiserver
-
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchLabels:
+ k8s-app: clustermesh-apiserver
+ topologyKey: kubernetes.io/hostname
# -- Pod topology spread constraints for clustermesh-apiserver
topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: topology.kubernetes.io/zone
- # whenUnsatisfiable: DoNotSchedule
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
# -- Node labels for pod assignment
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
nodeSelector:
kubernetes.io/os: linux
-
# -- Node tolerations for pod assignment on nodes with taints
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
-
# -- clustermesh-apiserver update strategy
updateStrategy:
type: RollingUpdate
rollingUpdate:
- maxUnavailable: 1
-
+ # @schema
+ # type: [integer, string]
+ # @schema
+ maxSurge: 1
+ # @schema
+ # type: [integer, string]
+ # @schema
+ maxUnavailable: 0
# -- The priority class to use for clustermesh-apiserver
priorityClassName: ""
-
tls:
# -- Configure the clustermesh authentication mode.
# Supported values:
@@ -3186,7 +3412,13 @@ charts:
# if provided manually. Cluster mode is meaningful only when the same
# CA is shared across all clusters part of the mesh.
authMode: legacy
-
+ # -- Allow users to provide their own certificates
+ # Users may need to provide their certificates using
+ # a mechanism that requires they provide their own secrets.
+ # This setting does not apply to any of the auto-generated
+ # mechanisms below, it only restricts the creation of secrets
+ # via the `tls-provided` templates.
+ enableSecrets: true
# -- Configure automatic TLS certificates generation.
# A Kubernetes CronJob is used the generate any
# certificates not provided by the user at installation
@@ -3250,20 +3482,17 @@ charts:
remote:
cert: ""
key: ""
-
# clustermesh-apiserver Prometheus metrics configuration
metrics:
# -- Enables exporting apiserver metrics in OpenMetrics format.
enabled: true
# -- Configure the port the apiserver metric server listens on.
port: 9962
-
kvstoremesh:
# -- Enables exporting KVStoreMesh metrics in OpenMetrics format.
enabled: true
# -- Configure the port the KVStoreMesh metric server listens on.
port: 9964
-
etcd:
# -- Enables exporting etcd metrics in OpenMetrics format.
enabled: true
@@ -3271,7 +3500,6 @@ charts:
mode: basic
# -- Configure the port the etcd metric server listens on.
port: 9963
-
serviceMonitor:
# -- Enable service monitor.
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
@@ -3286,32 +3514,46 @@ charts:
# -- Interval for scrape metrics (apiserver metrics)
interval: "10s"
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
relabelings: ~
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
metricRelabelings: ~
-
kvstoremesh:
# -- Interval for scrape metrics (KVStoreMesh metrics)
interval: "10s"
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
relabelings: ~
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
metricRelabelings: ~
-
etcd:
# -- Interval for scrape metrics (etcd metrics)
interval: "10s"
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
relabelings: ~
+ # @schema
+ # type: [null, array]
+ # @schema
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
metricRelabelings: ~
-
# -- Configure external workloads support
externalWorkloads:
# -- Enable support for external workloads, such as VMs (false by default).
enabled: false
-
# -- Configure cgroup related configuration
cgroup:
autoMount:
@@ -3324,33 +3566,36 @@ charts:
enabled: true
# -- Init Container Cgroup Automount resource limits & requests
resources: {}
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
- # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`)
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+ # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`)
hostRoot: /run/cilium/cgroupv2
-
+ # -- Configure sysctl override described in #20072.
+ sysctlfix:
+ # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute.
+ enabled: true
# -- Configure whether to enable auto detect of terminating state for endpoints
# in order to support graceful termination.
enableK8sTerminatingEndpoint: true
-
# -- Configure whether to unload DNS policy rules on graceful shutdown
# dnsPolicyUnloadOnShutdown: false
# -- Configure the key of the taint indicating that Cilium is not ready on the node.
# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up.
agentNotReadyTaintKey: "node.cilium.io/agent-not-ready"
-
dnsProxy:
+ # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background.
+ socketLingerTimeout: 10
# -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'.
dnsRejectResponseCode: refused
# -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
enableDnsCompression: true
# -- Maximum number of IPs to maintain per FQDN name for each endpoint.
- endpointMaxIpPerHostname: 50
+ endpointMaxIpPerHostname: 1000
# -- Time during which idle but previously active connections with expired DNS lookups are still considered alive.
idleConnectionGracePeriod: 0s
# -- Maximum number of IPs to retain for expired DNS lookups with still-active connections.
@@ -3368,12 +3613,12 @@ charts:
proxyResponseMaxDelay: 100ms
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
# enableTransparentMode: true
-
- # -- SCTP Configuration Values
+ # -- SCTP Configuration Values
sctp:
# -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming.
enabled: false
-
+ # -- Enable Non-Default-Deny policies
+ enableNonDefaultDenyPolicies: true
# Configuration for types of authentication for Cilium (beta)
authentication:
# -- Enable authentication processing and garbage collection.
@@ -3384,11 +3629,11 @@ charts:
queueSize: 1024
# -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers.
rotatedIdentitiesQueueSize: 1024
- # -- Interval for garbage collection of auth map entries.
+ # -- Interval for garbage collection of auth map entries.
gcInterval: "5m0s"
# Configuration for Cilium's service-to-service mutual authentication using TLS handshakes.
# Note that this is not full mTLS support without also enabling encryption of some form.
- # Current encryption options are Wireguard or IPSec, configured in encryption block above.
+ # Current encryption options are WireGuard or IPsec, configured in encryption block above.
mutual:
# -- Port on the agent where mutual authentication handshakes between agents will be performed
port: 4250
@@ -3411,21 +3656,29 @@ charts:
existingNamespace: false
# -- init container image of SPIRE agent and server
initImage:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "docker.io/library/busybox"
- tag: "1.36.1"
- digest: "sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b"
- useDigest: true
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/busybox"
+ tag: "v1.36.1"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
# SPIRE agent configuration
agent:
+ # -- The priority class to use for the spire agent
+ priorityClassName: ""
# -- SPIRE agent image
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "ghcr.io/spiffe/spire-agent"
- tag: "1.8.5"
- digest: "sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b"
- useDigest: true
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/spire-agent"
+ tag: "1.9.6"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
# -- SPIRE agent service account
serviceAccount:
@@ -3435,6 +3688,8 @@ charts:
annotations: {}
# -- SPIRE agent labels
labels: {}
+ # -- container resource limits & requests
+ resources: {}
# -- SPIRE Workload Attestor kubelet verification.
skipKubeletVerification: true
# -- SPIRE agent tolerations configuration
@@ -3442,17 +3697,17 @@ charts:
# to allow the Cilium agent on this node to connect to SPIRE.
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- - key: node.kubernetes.io/not-ready
- effect: NoSchedule
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- - key: node-role.kubernetes.io/control-plane
- effect: NoSchedule
- - key: node.cloudprovider.kubernetes.io/uninitialized
- effect: NoSchedule
- value: "true"
- - key: CriticalAddonsOnly
- operator: "Exists"
+ - key: node.kubernetes.io/not-ready
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: node-role.kubernetes.io/control-plane
+ effect: NoSchedule
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ effect: NoSchedule
+ value: "true"
+ - key: CriticalAddonsOnly
+ operator: "Exists"
# -- SPIRE agent affinity configuration
affinity: {}
# -- SPIRE agent nodeSelector configuration
@@ -3467,13 +3722,18 @@ charts:
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
securityContext: {}
server:
+ # -- The priority class to use for the spire server
+ priorityClassName: ""
# -- SPIRE server image
image:
+ # @schema
+ # type: [null, string]
+ # @schema
override: ~
- repository: "ghcr.io/spiffe/spire-server"
- tag: "1.8.5"
- digest: "sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428"
- useDigest: true
+ repository: "us-docker.pkg.dev/palette-images/packs/cilium/1.17.1/spire-server"
+ tag: "1.9.6"
+ digest: ""
+ useDigest: false
pullPolicy: "IfNotPresent"
# -- SPIRE server service account
serviceAccount:
@@ -3486,6 +3746,8 @@ charts:
# -- SPIRE server labels
labels: {}
# SPIRE server service configuration
+ # -- container resource limits & requests
+ resources: {}
service:
# -- Service type for the SPIRE server service
type: ClusterIP
@@ -3509,6 +3771,9 @@ charts:
size: 1Gi
# -- Access mode of the SPIRE server data storage
accessMode: ReadWriteOnce
+ # @schema
+ # type: [null, string]
+ # @schema
# -- StorageClass of the SPIRE server data storage
storageClass: null
# -- Security context to be added to spire server pods.
@@ -3529,6 +3794,9 @@ charts:
country: "US"
organization: "SPIRE"
commonName: "Cilium SPIRE CA"
+ # @schema
+ # type: [null, string]
+ # @schema
# -- SPIRE server address used by Cilium Operator
#
# If k8s Service DNS along with port number is used (e.g. ..svc(.*): format),
@@ -3545,3 +3813,7 @@ charts:
agentSocketPath: /run/spire/sockets/agent/agent.sock
# -- SPIRE connection timeout
connectionTimeout: 30s
+ # -- Enable Internal Traffic Policy
+ enableInternalTrafficPolicy: true
+ # -- Enable LoadBalancer IP Address Management
+ enableLBIPAM: true
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/csi-values.yaml b/terraform/vmo-cluster/manifests/csi-values.yaml
index 9b64075..0586c4f 100644
--- a/terraform/vmo-cluster/manifests/csi-values.yaml
+++ b/terraform/vmo-cluster/manifests/csi-values.yaml
@@ -1,28 +1,47 @@
pack:
content:
- images: []
+ images:
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/k8s-sidecar:v0.11.0
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/cephcsi:v3.13.0
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-node-driver-registrar:v2.13.0
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-resizer:v1.13.1
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-provisioner:v5.1.0
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-snapshotter:v8.2.0
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-attacher:v4.8.0
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/rook/ceph:v1.16.3
+ - image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/ceph/ceph:v19.2.0
charts:
- repo: https://charts.rook.io/release
name: rook-release/rook-ceph
- version: 1.14.9
+ version: 1.16.3
- repo: https://charts.rook.io/release
name: rook-release/rook-ceph-cluster
- version: 1.14.9
+ version: 1.16.3
namespace: rook-ceph
namespaceLabels:
"rook-ceph": "pod-security.kubernetes.io/enforce=privileged,pod-security.kubernetes.io/enforce-version=v{{ .spectro.system.kubernetes.version | substr 0 4 }}"
+readinessCheck:
+ v1beta1:
+ - name: "rook-ceph"
+ namespace: "rook-ceph"
+ kind: "CephCluster"
+ group: "ceph.rook.io"
+ version: "v1"
+ expectedValue: "Ready"
+ keyToCheck: "status.phase"
+
charts:
rook-ceph:
# Default values for rook-ceph-operator
image:
# -- Image
- repository: rook/ceph
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/rook/ceph
# -- Image tag
# @default -- `master`
- tag: v1.14.9
+ tag: v1.16.3
# -- Image pull policy
pullPolicy: IfNotPresent
@@ -156,7 +175,7 @@ charts:
enableMetadata: false
# -- Set replicas for csi provisioner deployment
- provisionerReplicas: 2
+ provisionerReplicas: 1
# -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
# in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
@@ -502,39 +521,39 @@ charts:
cephcsi:
# -- Ceph CSI image repository
- repository: quay.io/cephcsi/cephcsi
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/cephcsi
# -- Ceph CSI image tag
- tag: v3.11.0
+ tag: v3.13.0
registrar:
# -- Kubernetes CSI registrar image repository
- repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-node-driver-registrar
# -- Registrar image tag
- tag: v2.10.1
+ tag: v2.13.0
provisioner:
# -- Kubernetes CSI provisioner image repository
- repository: registry.k8s.io/sig-storage/csi-provisioner
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-provisioner
# -- Provisioner image tag
- tag: v4.0.1
+ tag: v5.1.0
snapshotter:
# -- Kubernetes CSI snapshotter image repository
- repository: registry.k8s.io/sig-storage/csi-snapshotter
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-snapshotter
# -- Snapshotter image tag
- tag: v7.0.2
+ tag: v8.2.0
attacher:
# -- Kubernetes CSI Attacher image repository
- repository: registry.k8s.io/sig-storage/csi-attacher
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-attacher
# -- Attacher image tag
- tag: v4.5.1
+ tag: v4.8.0
resizer:
# -- Kubernetes CSI resizer image repository
- repository: registry.k8s.io/sig-storage/csi-resizer
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/csi-resizer
# -- Resizer image tag
- tag: v1.10.1
+ tag: v1.13.1
# -- Image pull policy
imagePullPolicy: IfNotPresent
@@ -552,9 +571,9 @@ charts:
# -- Enable CSIAddons
enabled: false
# -- CSIAddons sidecar image repository
- repository: quay.io/csiaddons/k8s-sidecar
+ repository: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/k8s-sidecar
# -- CSIAddons sidecar image tag
- tag: v0.8.0
+ tag: v0.11.0
nfs:
# -- Enable the nfs csi driver
@@ -649,12 +668,21 @@ charts:
# cpu: 100m
# memory: 128Mi
- # -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
+ # -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used
+ customHostnameLabel:
+
+ # -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
hostpathRequiresPrivileged: false
+ # -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
+ enforceHostNetwork: false
+
# -- Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
+ # -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
+ revisionHistoryLimit:
+
# -- Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
@@ -697,7 +725,7 @@ charts:
enabled: true
# -- Toolbox image, defaults to the image used by the Ceph cluster
image:
- #quay.io/ceph/ceph:v18.2.4
+ #us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/ceph/ceph:v19.2.0
# -- Toolbox tolerations
tolerations: []
@@ -725,6 +753,8 @@ charts:
# -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
# Monitoring requires Prometheus to be pre-installed
enabled: false
+ # -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled
+ metricsDisabled: false
# -- Whether to create the Prometheus rules for Ceph alerts
createPrometheusRules: false
# -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
@@ -763,18 +793,18 @@ charts:
# For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
- # v17 is Quincy, v18 is Reef.
+ # v18 is Reef, v19 is Squid
# RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
- # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724
+ # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.0-20240927
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
- image: quay.io/ceph/ceph:v18.2.4
- # Whether to allow unsupported versions of Ceph. Currently `quincy`, and `reef` are supported.
- # Future versions such as `squid` (v19) would require this to be set to `true`.
+ image: us-docker.pkg.dev/palette-images/packs/rook-ceph-helm/1.16.3/ceph/ceph:v19.2.0
+ # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
+ # Future versions such as Tentacle (v20) would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
- # The path on the host where configuration files will be persisted. Must be specified.
+ # The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
@@ -812,8 +842,6 @@ charts:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
- count: 1
- allowMultiplePerNode: true
modules:
# List of modules to optionally enable or disable.
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
@@ -821,10 +849,12 @@ charts:
# enabled: true
# enable the ceph dashboard for viewing cluster status
+ count: 1
+ allowMultiplePerNode: true
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
- urlPrefix: /ceph-dashboard
+ # urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
@@ -843,7 +873,7 @@ charts:
encryption:
enabled: false
# Whether to compress the data in transit across the wire. The default is false.
- # Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
+ # The kernel requirements above for encryption also apply to compression.
compression:
enabled: false
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
@@ -871,7 +901,7 @@ charts:
# enable the crash collector for ceph daemon crash collection
crashCollector:
- disable: true
+ disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
# daysToRetain: 30
@@ -1021,7 +1051,7 @@ charts:
memory: "50Mi"
# The option to automatically remove OSDs that are out and are safe to destroy.
- removeOSDsIfOutAndSafeToRemove: true
+ removeOSDsIfOutAndSafeToRemove: false
# priority classes to apply to ceph resources
priorityClassNames:
@@ -1092,20 +1122,24 @@ charts:
ingress:
# -- Enable an ingress for the ceph-dashboard
- dashboard:
- annotations:
- cert-manager.io/issuer: selfsigned-issuer
- nginx.ingress.kubernetes.io/backend-protocol: HTTPS
- nginx.ingress.kubernetes.io/server-snippet: |
- proxy_ssl_verify off;
- host:
- name: ceph-vmo-lab.maas-eng.sc
- path: "/ceph-dashboard/"
- tls:
- - hosts:
- - ceph-vmo-lab.maas-eng.sc
- secretName: ceph-dashboard-tls
- ingressClassName: nginx
+ dashboard: {}
+ # annotations:
+ # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
+ # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
+ # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
+ # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ # nginx.ingress.kubernetes.io/server-snippet: |
+ # proxy_ssl_verify off;
+ # host:
+ # name: dashboard.example.com
+ # path: "/ceph-dashboard(/|$)(.*)"
+ # tls:
+ # - hosts:
+ # - dashboard.example.com
+ # secretName: testsecret-tls
+ ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
+ ## to set the ingress class
+ # ingressClassName: nginx
# -- A list of CephBlockPool configurations to deploy
# @default -- See [below](#ceph-block-pools)
@@ -1115,15 +1149,14 @@ charts:
spec:
failureDomain: host
replicated:
- size: ${worker_nodes}
+ size: 1
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
# For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
# enableRBDStats: true
storageClass:
enabled: true
name: ceph-block
- annotations:
- storageclass.kubevirt.io/is-default-virt-class: "true"
+ annotations: {}
labels: {}
isDefault: false
reclaimPolicy: Delete
@@ -1181,11 +1214,11 @@ charts:
spec:
metadataPool:
replicated:
- size: ${worker_nodes}
+ size: 1
dataPools:
- failureDomain: host
replicated:
- size: ${worker_nodes}
+ size: 1
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
name: data0
metadataServer:
@@ -1250,63 +1283,120 @@ charts:
# -- A list of CephObjectStore configurations to deploy
# @default -- See [below](#ceph-object-stores)
- cephObjectStores: []
- ## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
- ## For erasure coded a replicated metadata pool is required.
- ## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
- #cephECBlockPools:
- # - name: ec-pool
- # spec:
- # metadataPool:
- # replicated:
- # size: 2
- # dataPool:
- # failureDomain: osd
- # erasureCoded:
- # dataChunks: 2
- # codingChunks: 1
- # deviceClass: hdd
- #
- # parameters:
- # # clusterID is the namespace where the rook cluster is running
- # # If you change this namespace, also change the namespace below where the secret namespaces are defined
- # clusterID: rook-ceph # namespace:cluster
- # # (optional) mapOptions is a comma-separated list of map options.
- # # For krbd options refer
- # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
- # # For nbd options refer
- # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
- # # mapOptions: lock_on_read,queue_depth=1024
- #
- # # (optional) unmapOptions is a comma-separated list of unmap options.
- # # For krbd options refer
- # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
- # # For nbd options refer
- # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
- # # unmapOptions: force
- #
- # # RBD image format. Defaults to "2".
- # imageFormat: "2"
- #
- # # RBD image features, equivalent to OR'd bitfield value: 63
- # # Available for imageFormat: "2". Older releases of CSI RBD
- # # support only the `layering` feature. The Linux kernel (KRBD) supports the
- # # full feature complement as of 5.4
- # # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
- # imageFeatures: layering
- #
- # storageClass:
- # provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
- # enabled: true
- # name: rook-ceph-block
- # isDefault: false
- # annotations: { }
- # labels: { }
- # allowVolumeExpansion: true
- # reclaimPolicy: Delete
-
- # -- CSI driver name prefix for cephfs, rbd and nfs.
- # @default -- `namespace name where rook-ceph operator is deployed`
+ cephObjectStores:
+ - name: ceph-objectstore
+ # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
+ spec:
+ metadataPool:
+ failureDomain: host
+ replicated:
+ size: 1
+ dataPool:
+ failureDomain: host
+ erasureCoded:
+ dataChunks: 2
+ codingChunks: 1
+ parameters:
+ bulk: "true"
+ preservePoolsOnDelete: true
+ gateway:
+ port: 80
+ resources:
+ limits:
+ memory: "2Gi"
+ requests:
+ cpu: "1000m"
+ memory: "1Gi"
+ # securePort: 443
+ # sslCertificateRef:
+ instances: 1
+ priorityClassName: system-cluster-critical
+ # opsLogSidecar:
+ # resources:
+ # limits:
+ # memory: "100Mi"
+ # requests:
+ # cpu: "100m"
+ # memory: "40Mi"
+ storageClass:
+ enabled: true
+ name: ceph-bucket
+ reclaimPolicy: Delete
+ volumeBindingMode: "Immediate"
+ annotations: {}
+ labels: {}
+ # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
+ parameters:
+ # note: objectStoreNamespace and objectStoreName are configured by the chart
+ region: us-east-1
+ ingress:
+ # Enable an ingress for the ceph-objectstore
+ enabled: false
+ # annotations: {}
+ # host:
+ # name: objectstore.example.com
+ # path: /
+ # tls:
+ # - hosts:
+ # - objectstore.example.com
+ # secretName: ceph-objectstore-tls
+ # ingressClassName: nginx
+ ## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
+ ## For erasure coded a replicated metadata pool is required.
+ ## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
+ #cephECBlockPools:
+ # - name: ec-pool
+ # spec:
+ # metadataPool:
+ # replicated:
+ # size: 2
+ # dataPool:
+ # failureDomain: osd
+ # erasureCoded:
+ # dataChunks: 2
+ # codingChunks: 1
+ # deviceClass: hdd
+ #
+ # parameters:
+ # # clusterID is the namespace where the rook cluster is running
+ # # If you change this namespace, also change the namespace below where the secret namespaces are defined
+ # clusterID: rook-ceph # namespace:cluster
+ # # (optional) mapOptions is a comma-separated list of map options.
+ # # For krbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # # For nbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # # mapOptions: lock_on_read,queue_depth=1024
+ #
+ # # (optional) unmapOptions is a comma-separated list of unmap options.
+ # # For krbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
+ # # For nbd options refer
+ # # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
+ # # unmapOptions: force
+ #
+ # # RBD image format. Defaults to "2".
+ # imageFormat: "2"
+ #
+ # # RBD image features, equivalent to OR'd bitfield value: 63
+ # # Available for imageFormat: "2". Older releases of CSI RBD
+ # # support only the `layering` feature. The Linux kernel (KRBD) supports the
+ # # full feature complement as of 5.4
+ # # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
+ # imageFeatures: layering
+ #
+ # storageClass:
+ # provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
+ # enabled: true
+ # name: rook-ceph-block
+ # isDefault: false
+ # annotations: { }
+ # labels: { }
+ # allowVolumeExpansion: true
+ # reclaimPolicy: Delete
+
+ # -- CSI driver name prefix for cephfs, rbd and nfs.
+ # @default -- `namespace name where rook-ceph operator is deployed`
csiDriverNamePrefix:
configOverride: |
[global]
@@ -1314,4 +1404,4 @@ charts:
mon_warn_on_pool_no_redundancy = false
bdev_flock_retry = 20
bluefs_buffered_io = false
- mon_data_avail_warn = 10
+ mon_data_avail_warn = 10
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/k8s-values.yaml b/terraform/vmo-cluster/manifests/k8s-values.yaml
index 6a2c879..acd9fbc 100644
--- a/terraform/vmo-cluster/manifests/k8s-values.yaml
+++ b/terraform/vmo-cluster/manifests/k8s-values.yaml
@@ -11,10 +11,10 @@ pack:
- image: registry.k8s.io/pause:3.8
#CIDR Range for Pods in cluster
# Note : This must not overlap with any of the host or service network
- podCIDR: ${pod-cidr} #"192.168.0.0/16"
+ podCIDR: "100.64.0.0/18"
#CIDR notation IP range from which to assign service cluster IPs
# Note : This must not overlap with any IP ranges assigned to nodes for pods.
- serviceClusterIpRange: ${clusterServicesCIDR} #"10.96.0.0/12"
+ serviceClusterIpRange: "100.64.64.0/18"
palette:
config:
dashboard:
@@ -121,4 +121,4 @@ kubeadmconfig:
#oidc-issuer-url: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-issuer-url }}"
#oidc-client-id: "{{ .spectro.pack.kubernetes.kubeadmconfig.apiServer.extraArgs.oidc-client-id }}"
#oidc-client-secret: 1gsranjjmdgahm10j8r6m47ejokm9kafvcbhi3d48jlc3rfpprhv
- #oidc-extra-scope: profile,email
+ #oidc-extra-scope: profile,email
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/metallb-values.yaml b/terraform/vmo-cluster/manifests/metallb-values.yaml
index 78d00af..30af484 100644
--- a/terraform/vmo-cluster/manifests/metallb-values.yaml
+++ b/terraform/vmo-cluster/manifests/metallb-values.yaml
@@ -19,7 +19,7 @@ charts:
first-pool:
spec:
addresses:
- - ${metallb-ip-pool} ###.###.###.### # Add your static IP addresses for MetalLB
+ - 10.11.130.129-10.11.130.131
# - 192.168.100.50-192.168.100.60
avoidBuggyIPs: true
autoAssign: true
@@ -380,4 +380,4 @@ charts:
# mode.
enabled: false
external: false
- namespace: ""
+ namespace: ""
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
index e7aeb8f..427d5dc 100644
--- a/terraform/vmo-cluster/manifests/ubuntu-values.yaml
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -5,12 +5,13 @@ kubeadmconfig:
- apt update
- apt install -y grepcidr
- |
- NETWORKS=${maas-host-cidr} #"###.###.###.###/###"
+ # Enter as a CIDR '10.11.130.0/24'
+ NETWORKS=${node-network}
IPS=$(hostname -I)
for IP in $IPS
do
- echo "$IP" | grepcidr "$NETWORKS" >/dev/null && echo " --node-ip=$IP" >> /etc/default/kubelet
- if [ $? == 0 ]; then break; fi
+ echo "$IP" | grepcidr "$NETWORKS" >/dev/null && echo " --node-ip=$IP" >> /etc/default/kubelet
+ if [ $? == 0 ]; then break; fi
done
# Increase audit_backlog_limit
- sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="audit_backlog_limit=256"/g' /etc/default/grub
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
index 7cc94dc..6244567 100644
--- a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -6,16 +6,14 @@ spec:
description: Ubuntu 22.04
displayName: Ubuntu 22.04
icon: https://s3.amazonaws.com/manifests.spectrocloud.com/logos/ubuntu.png
- running: false
dataVolumeTemplates:
- metadata:
name: ubuntu-2204
spec:
source:
- pvc:
+ pvc:
name: template-ubuntu-2204
namespace: vmo-golden-images
- #storage: (errors in VMO GUI)
pvc:
accessModes:
- ReadWriteMany
@@ -39,12 +37,12 @@ spec:
threads: 1
devices:
disks:
- - disk:
- bus: virtio
- name: datavolume-os
- - disk:
- bus: virtio
- name: cloudinitdisk
+ - disk:
+ bus: virtio
+ name: datavolume-os
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
interfaces:
- masquerade: {}
name: default
@@ -58,37 +56,37 @@ spec:
requests:
memory: 2Gi
networks:
- - name: default
- pod: {}
+ - name: default
+ pod: {}
volumes:
- - dataVolume:
- name: ubuntu-2204
- name: datavolume-os
- - cloudInitNoCloud:
- userData: |
- #cloud-config
- ssh_pwauth: True
- chpasswd: { expire: False }
- password: spectro
- disable_root: false
- runcmd:
- - apt-get update
- - apt-get install -y qemu-guest-agent
- - systemctl start qemu-guest-agent
- - |
- apt-get -y install ca-certificates curl
- install -m 0755 -d /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
- chmod a+r /etc/apt/keyrings/docker.asc
- echo \
- "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
- $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
- tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- groupadd docker
- gpasswd -a ubuntu docker
- name: cloudinitdisk
+ - dataVolume:
+ name: ubuntu-2204
+ name: datavolume-os
+ - cloudInitNoCloud:
+ userData: |
+ #cloud-config
+ ssh_pwauth: True
+ chpasswd: { expire: False }
+ password: spectro
+ disable_root: false
+ runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
+ name: cloudinitdisk
---
apiVersion: cdi.kubevirt.io/v1beta1
kind: DataVolume
@@ -116,11 +114,10 @@ kind: StorageProfile
metadata:
name: ceph-filesystem
spec:
- claimPropertySets:
- - accessModes:
- - ReadWriteMany
- volumeMode:
- Filesystem
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
cloneStrategy: csi-clone
---
apiVersion: cdi.kubevirt.io/v1beta1
@@ -128,9 +125,8 @@ kind: StorageProfile
metadata:
name: ceph-block
spec:
- claimPropertySets:
- - accessModes:
- - ReadWriteMany
- volumeMode:
- Block
- cloneStrategy: csi-clone
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode: Block
+ cloneStrategy: csi-clone
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
index 6be493e..cd4752f 100644
--- a/terraform/vmo-cluster/manifests/vmo-values.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -81,13 +81,13 @@ charts:
tag: "latest"
env:
# Which bridge interface to control
- bridgeIF: ${vmo-network-interface} #"br0"
+ bridgeIF: "br0"
# Beginning of VLAN range to enable
- allowedVlans: ${vm-vlans} # "1"
+ allowedVlans: "1"
# Set to "true" to enable VLANs on the br0 interface for the host to use itself
allowVlansOnSelf: "true"
# Beginning of VLAN range to enable for use by the node itself
- allowedVlansOnSelf: ${host-vlans} #"1"
+ allowedVlansOnSelf: "1"
snapshot-controller:
enabled: true
replicas: 1
diff --git a/terraform/vmo-cluster/provider.tf b/terraform/vmo-cluster/provider.tf
index 8294ea0..8ed924f 100644
--- a/terraform/vmo-cluster/provider.tf
+++ b/terraform/vmo-cluster/provider.tf
@@ -4,7 +4,7 @@
terraform {
required_providers {
spectrocloud = {
- version = ">= 0.22.2"
+ version = "0.23.6"
source = "spectrocloud/spectrocloud"
}
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 95faf86..c23f191 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -4,78 +4,68 @@
#####################
# Palette Settings
#####################
-palette-project = "Default" # The name of your project in Palette.
+palette-project = "Default" # The name of your project in Palette.
############################
# MAAS Deployment Settings
############################
-deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
-deploy-maas-vm = false # Set to true to create a VM on MAAS VMO cluster once deployed.
-
-pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
-maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
-
-maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
-maas-worker-resource-pool = "bm-generic" # Provide a resource pool for the worker nodes.
-maas-worker-azs = ["default"] # Provide a set of availability zones for the worker nodes.
-maas-worker-node-tags = ["docs"] # Provide a set of node tags for the worker nodes.
-
-maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
-maas-control-plane-resource-pool = "Palette-Sustaining" # Provide a resource pool for the control plane nodes.
-maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
-maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
-
-ctl-node-min-cpu = "6" # Minimum number of CPU cores required for control plane nodes
-ctl-node-min-memory-mb = "16384" # Minimum amount of RAM (memory) required for control plane nodes
-wrk-node-min-cpu = "8" # Minimum number of CPU cores required for worker nodes
-wrk-node-min-memory-mb = "16384" # Minimum amount of RAM (memory) required for worker nodes
-
+deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
+deploy-maas-vm = true # Set to true to create a VM on MAAS VMO cluster once deployed.
+pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
+maas-control-plane-resource-pool = "Palette-Sustaining" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
+ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
+ctl-node-min-memory-mb = 8096 # Minimum amount of RAM (memory) required for control plane nodes
+maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
+maas-worker-resource-pool = "vmo-validation" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["az1"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["docs"] # Provide a set of node tags for the worker nodes.
+wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
+wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
#####################
# cluster_profiles.tf
#####################
-vmo_cluster_name = "vmo-cluster-maas"
-clusterProfileType = "Full" # Infrastructure, Full, or Add-on
-clusterProfileVersion = 1.0.0 # Version number for the cluster profile in Palette
-
-
-####################
-# ubuntu-values.tf
-#####################
-maas-host-cidr = "10.11.110.130/24"
+vmo-cluster-name = "vmo-cluster-maas"
+cluster-profile-type = "Full" # Infrastructure, Full, or Add-on
+cluster-profile-version = "1.0.0 " # Version number for the cluster profile in Palette
#####################
# vmo-values.tf
-#####################
-vmo-network-interface = "br0"
-vm-vlans = ["1"]
-host-vlans = ["1"]
-
-
+####################
+vmo-network-interface = ["br0"]
+vm-vlans = 1
+host-vlans = 1
###########################
# manifests/k8s-values.yaml
###########################
-pod-CIDR = "100.64.0.0/16" # Set the subnet that your pods will run on
-clusterServicesCIDR = "100.64.64.0/16"
-
+pod-CIDR = ["100.64.0.0/18"] # Set the subnet that your pods will run on
+cluster-services-CIDR = ["100.64.64.0/18"]
###############################
# manifests/metallb-values.yaml
###############################
-metallb-ip-pool = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for use by MetalLB
+metallb-ip-pool = ["10.11.130.129-10.11.130.131"] # IP addresses to be assigned for use by MetalLB
+###############################
+# manifests/ubuntu-values.yaml
+###############################
+node-network = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for use by MetalLB
#####################
# virtual_machines.tf
#####################
-vm-deploy-namespace = "default" # Namespace where your VM will be deployed.
-vm-deploy-name = "vmo-vm" # The name of your VM
-vm-labels = ["my-vmo-vm"] # Labels that will be applied to your VM. For this tutorial, use a single label.
-vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
-vm-cpu-cores = 2 # Number of CPU cores your VM will have.
-vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
-vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
-vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
+vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
+vm-deploy-name = "vmo-vm" # The name of your VM
+vm-labels = ["my-vmo-vm"] # Labels that will be applied to your VM.
+vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
+vm-cpu-cores = 2 # Number of CPU cores your VM will have.
+vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
+vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
+vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
\ No newline at end of file
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
index 8e9272a..8ac1996 100644
--- a/terraform/vmo-cluster/virtual_machines.tf
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -18,7 +18,7 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
labels = {
- labels = var.vm-labels
+ #var.vm-labels
# "tf" = "spectrocloud-tutorials"
"kubevirt.io/vm" = "ubuntu-tutorial-vm"
}
From 8ff7d8f85a17586057633459cae4f0535ae9fa3e Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Wed, 11 Jun 2025 11:05:13 -0400
Subject: [PATCH 06/11] PR-Review
---
terraform/vmo-cluster/README.md | 83 ++++++++++++-------
terraform/vmo-cluster/cluster_profiles.tf | 40 ++++-----
terraform/vmo-cluster/inputs.tf | 18 +++-
.../manifests/vmo-extras-manifest.yaml | 27 +++++-
terraform/vmo-cluster/terraform.tfvars | 73 ++++++++--------
.../maas-cluster-replace-values.tftest.hcl | 14 +++-
.../tests/maas-cluster-vm.tftest.hcl | 8 ++
.../virtual-machine-missing-values.tftest.hcl | 33 ++++++++
...virtual-machines-replace-values.tftest.hcl | 35 ++++++++
terraform/vmo-cluster/virtual_machines.tf | 18 ++--
10 files changed, 252 insertions(+), 97 deletions(-)
create mode 100644 terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl
create mode 100644 terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
diff --git a/terraform/vmo-cluster/README.md b/terraform/vmo-cluster/README.md
index f62ee05..df72fa0 100644
--- a/terraform/vmo-cluster/README.md
+++ b/terraform/vmo-cluster/README.md
@@ -19,16 +19,14 @@ To get started, open the **terraform.tfvars** file. Toggle the provider variable
|------|---------|
| [terraform](#requirement\_terraform) | >= 1.9 |
| [local](#requirement\_local) | 2.4.1 |
-| [spectrocloud](#requirement\_spectrocloud) | >= 0.22.2 |
+| [spectrocloud](#requirement\_spectrocloud) | 0.23.6 |
| [tls](#requirement\_tls) | 4.0.4 |
## Providers
| Name | Version |
|------|---------|
-| [local](#provider\_local) | 2.4.1 |
-| [spectrocloud](#provider\_spectrocloud) | 0.22.2 |
-| [tls](#provider\_tls) | 4.0.4 |
+| [spectrocloud](#provider\_spectrocloud) | 0.23.6 |
## Modules
@@ -38,37 +36,62 @@ No modules.
| Name | Type |
|------|------|
-| [spectrocloud_cluster_profile.maas-vmo-profile](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_profile) | resource |
-| [spectrocloud_cluster_maas.maas-cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/cluster_maas) | resource |
-| [spectrocloud_virtual_machine.virtual-machine](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/resources/virtual_machine) | resource |
-| [spectrocloud_cloudaccount_maas.account](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/cloudaccount_maas) | data source |
-| [spectrocloud_pack.maas_vmo](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
-| [spectrocloud_pack.maas_cni](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
-| [spectrocloud_pack.maas_csi](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
-| [spectrocloud_pack.maas_k8s](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
-| [spectrocloud_pack.maas_ubuntu](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/pack) | data source |
-| [spectrocloud_cluster.maas_vmo_cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/cluster) | data source |
-| [spectrocloud_registry.public_registry](https://registry.terraform.io/providers/spectrocloud/spectrocloud/latest/docs/data-sources/registry) | data source |
+| [spectrocloud_cluster_maas.maas-cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/resources/cluster_maas) | resource |
+| [spectrocloud_cluster_profile.maas-vmo-profile](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/resources/cluster_profile) | resource |
+| [spectrocloud_virtual_machine.virtual-machine](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/resources/virtual_machine) | resource |
+| [spectrocloud_cloudaccount_maas.account](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/cloudaccount_maas) | data source |
+| [spectrocloud_cluster.maas_vmo_cluster](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/cluster) | data source |
+| [spectrocloud_pack.maas_cni](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_csi](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_k8s](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_metallb](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_ubuntu](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/pack) | data source |
+| [spectrocloud_pack.maas_vmo](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/pack) | data source |
+| [spectrocloud_registry.public_registry](https://registry.terraform.io/providers/spectrocloud/spectrocloud/0.23.6/docs/data-sources/registry) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [deploy-maas](#input\_deploy-maas) | A flag for enabling a cluster deployment on MAAS. | `bool` | n/a | yes |
-| [deploy-maas-vm](#input\_deploy-maas-vm) | A flag for enabling a VM creation on a MAAS cluster. | `bool` | n/a | yes |
-| [pcg-name](#input\_pcg-name) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
-| [maas-domain](#input\_maas-domain) | The MaaS domain that will be used to deploy the cluster. | `string` | n/a | yes |
-| [maas-worker-nodes](#input\_maas-worker-nodes) | The number of worker nodes that will be used to deploy the cluster. | `number` | 1 | yes |
-| [maas-control-plane-nodes](#input\_maas-control-plane-nodes) | The number of control plane nodes that will be used to deploy the cluster. | `number` | 1 | yes |
-| [maas-worker-resource-pool](#input\_maas-worker-resource-pool) | The resource pool to deploy the worker nodes to. | `string` | n/a | yes |
-| [maas-control-plane-resource-pool](#input\_maas-control-plane-resource-pool) | The resource pool to deploy the control plane nodes to. | `string` | n/a | yes |
-| [maas-worker-azs](#input\_maas-worker-azs) | The set of availability zones to deploy the worker nodes to. | `set(string)` | n/a | yes |
-| [maas-control-plane-azs](#input\_maas-control-plane-azs) | The set of availability zones to deploy the control plane nodes to. | `set(string)` | n/a | yes |
-| [maas-worker-node-tags](#input\_maas-worker-node-tags) | The set of tag values that you want to apply to all nodes in the node worker pool. | `set(string)` | n/a | yes |
-| [maas-control-plane-node-tags](#input\_maas-control-plane-node-tags) | The set of tag values that you want to apply to all nodes in the node control plane pool. | `set(string)` | n/a | yes |
-| [tags](#input\_tags) | The default tags to apply to Palette resources. | `list(string)` | [
"spectro-cloud-education",
"spectrocloud:tutorials",
"terraform_managed:true",
"tutorial:vmo-cluster-deployment"
]
| no |
+| [cluster-profile-type](#input\_cluster-profile-type) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
+| [cluster-profile-version](#input\_cluster-profile-version) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
+| [cluster-services-CIDR](#input\_cluster-services-CIDR) | CIDR notation subnets for cluster services ex. 192.168.1.0/24. | `set(string)` | n/a | yes |
+| [ctl-node-min-cpu](#input\_ctl-node-min-cpu) | Minimum number of CPU cores allocated to the Control Plane node. | `number` | n/a | yes |
+| [ctl-node-min-memory-mb](#input\_ctl-node-min-memory-mb) | Minimum amount of RAM allocated to the Control Plane node. | `number` | n/a | yes |
+| [deploy-maas](#input\_deploy-maas) | A flag for enabling a deployment on MAAS. | `bool` | n/a | yes |
+| [deploy-maas-vm](#input\_deploy-maas-vm) | A flag for enabling a VM creation on the MAAS cluster. | `bool` | n/a | yes |
+| [host-vlans](#input\_host-vlans) | Node Allowed VLANs | `number` | `1` | no |
+| [maas-control-plane-azs](#input\_maas-control-plane-azs) | Set of AZs for the MAAS control plane nodes. | `set(string)` | n/a | yes |
+| [maas-control-plane-node-tags](#input\_maas-control-plane-node-tags) | Set of node tags for the MAAS control plane nodes. | `set(string)` | n/a | yes |
+| [maas-control-plane-nodes](#input\_maas-control-plane-nodes) | Number of MaaS control plane nodes | `number` | `1` | no |
+| [maas-control-plane-resource-pool](#input\_maas-control-plane-resource-pool) | Resource pool for the MAAS control plane nodes. | `string` | n/a | yes |
+| [maas-domain](#input\_maas-domain) | MAAS domain | `string` | n/a | yes |
+| [maas-worker-azs](#input\_maas-worker-azs) | Set of AZs for the MAAS worker nodes. | `set(string)` | n/a | yes |
+| [maas-worker-node-tags](#input\_maas-worker-node-tags) | Set of node tags for the MAAS worker nodes. | `set(string)` | n/a | yes |
+| [maas-worker-nodes](#input\_maas-worker-nodes) | Number of MaaS worker nodes | `number` | `1` | no |
+| [maas-worker-resource-pool](#input\_maas-worker-resource-pool) | Resource pool for the MAAS worker nodes. | `string` | n/a | yes |
+| [metallb-ip-pool](#input\_metallb-ip-pool) | CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255 | `set(string)` | n/a | yes |
+| [node-network](#input\_node-network) | The subnet the Ubuntu nodes will use. | `string` | n/a | yes |
+| [palette-project](#input\_palette-project) | The name of your project in Palette. | `string` | n/a | yes |
+| [palette-user-id](#input\_palette-user-id) | The amount of storage to provision for your VM in Gi. | `string` | n/a | yes |
+| [pcg-name](#input\_pcg-name) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
+| [pod-CIDR](#input\_pod-CIDR) | CIDR notation subnets for the pd network ex. 192.168.1.0/24. | `set(string)` | n/a | yes |
+| [tags](#input\_tags) | The default tags to apply to Palette resources. | `list(string)` | [
"spectro-cloud-education",
"spectrocloud:tutorials",
"terraform_managed:true",
"tutorial:vmo-cluster-deployment"
]
| no |
+| [vm-cpu-cores](#input\_vm-cpu-cores) | Number of CPU cores to allocate to your VM. | `number` | `1` | no |
+| [vm-cpu-sockets](#input\_vm-cpu-sockets) | Number of CPU cores to allocate to your VM. | `number` | `1` | no |
+| [vm-cpu-threads](#input\_vm-cpu-threads) | Number of CPU cores to allocate to your VM. | `number` | `1` | no |
+| [vm-deploy-name](#input\_vm-deploy-name) | The namespace where your VMs will be deployed. | `string` | n/a | yes |
+| [vm-deploy-namespace](#input\_vm-deploy-namespace) | The namespace where your VMs will be deployed. | `string` | n/a | yes |
+| [vm-labels](#input\_vm-labels) | The namespace where your VMs will be deployed. | `set(string)` | n/a | yes |
+| [vm-memory-Gi](#input\_vm-memory-Gi) | The amount of storage to provision for your VM in Gi. | `string` | n/a | yes |
+| [vm-storage-Gi](#input\_vm-storage-Gi) | The amount of storage to provision for your VM in Gi. | `string` | n/a | yes |
+| [vm-vlans](#input\_vm-vlans) | VM allowed VLANs. | `number` | `1` | no |
+| [vmo-cluster-name](#input\_vmo-cluster-name) | The name of the cluster. | `string` | n/a | yes |
+| [vmo-network-interface](#input\_vmo-network-interface) | The network interface VMO will use for VM traffic. | `set(string)` | n/a | yes |
+| [wrk-node-min-cpu](#input\_wrk-node-min-cpu) | Minimum number of CPU cores allocated to the Control Plane node. | `number` | n/a | yes |
+| [wrk-node-min-memory-mb](#input\_wrk-node-min-memory-mb) | Minimum amount of RAM allocated to the Control Plane node. | `number` | n/a | yes |
## Outputs
-No outputs.
-
+No outputs.
+
\ No newline at end of file
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
index b923767..c82f549 100644
--- a/terraform/vmo-cluster/cluster_profiles.tf
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -13,24 +13,24 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
version = var.cluster-profile-version
pack {
- name = data.spectrocloud_pack.maas_ubuntu.name
- tag = data.spectrocloud_pack.maas_ubuntu.version
- uid = data.spectrocloud_pack.maas_ubuntu.id
+ name = data.spectrocloud_pack.maas_ubuntu.name
+ tag = data.spectrocloud_pack.maas_ubuntu.version
+ uid = data.spectrocloud_pack.maas_ubuntu.id
values = templatefile("manifests/ubuntu-values.yaml", {
node-network = var.node-network
})
- type = "spectro"
+ type = "spectro"
}
pack {
- name = data.spectrocloud_pack.maas_k8s.name
- tag = data.spectrocloud_pack.maas_k8s.version
- uid = data.spectrocloud_pack.maas_k8s.id
+ name = data.spectrocloud_pack.maas_k8s.name
+ tag = data.spectrocloud_pack.maas_k8s.version
+ uid = data.spectrocloud_pack.maas_k8s.id
values = templatefile("manifests/k8s-values.yaml", {
- pod-CIDR = var.pod-CIDR,
+ pod-CIDR = var.pod-CIDR,
clusterServicesCIDR = var.cluster-services-CIDR
- type = "spectro"
- })
+ type = "spectro"
+ })
}
pack {
@@ -60,17 +60,17 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
})
type = "spectro"
}
-
+
pack {
- name = data.spectrocloud_pack.maas_vmo.name
- tag = data.spectrocloud_pack.maas_vmo.version
- uid = data.spectrocloud_pack.maas_vmo.id
+ name = data.spectrocloud_pack.maas_vmo.name
+ tag = data.spectrocloud_pack.maas_vmo.version
+ uid = data.spectrocloud_pack.maas_vmo.id
values = templatefile("manifests/vmo-values.yaml", {
vmo-network-interface = var.vmo-network-interface,
- vm-vlans = var.vm-vlans,
- host-vlans = var.host-vlans
+ vm-vlans = var.vm-vlans,
+ host-vlans = var.host-vlans
})
- type = "spectro"
+ type = "spectro"
}
pack {
@@ -79,8 +79,10 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
tag = "1.0.0"
values = file("manifests/vmo-extras-values.yaml")
manifest {
- name = "vmo-extras"
- content = file("manifests/vmo-extras-manifest.yaml")
+ name = "vmo-extras"
+ content = templatefile("manifests/vmo-extras-manifest.yaml", {
+ palette-user-id = var.palette-user-id
+ })
}
}
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index 7701bf2..348efe7 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -43,8 +43,8 @@ variable "cluster-profile-type" {
description = "The name of the PCG that will be used to deploy the cluster."
validation {
- condition = var.deploy-maas ? var.cluster-profile-type != "REPLACE ME" && lower(var.cluster-profile-type) == "full" || lower(var.cluster-profile-type) == "infrastructure" || lower(var.cluster-profile-type) == "add-on" : true
- error_message = "Cluster profile type must be 'full', 'infrastructure', 'add-on', or 'app'."
+ condition = var.deploy-maas ? var.cluster-profile-type != "REPLACE ME" && lower(var.cluster-profile-type) == "cluster" || lower(var.cluster-profile-type) == "infra" || lower(var.cluster-profile-type) == "add-on" || lower(var.cluster-profile-type) == "system" : true
+ error_message = "Cluster profile type must be 'cluster', 'infra', 'add-on', or 'system'."
}
}
@@ -342,7 +342,7 @@ variable "vm-storage-Gi" {
description = "The amount of storage to provision for your VM in Gi."
validation {
- condition = var.deploy-maas ? var.vm-storage-Gi != "REPLACE ME" && length(var.vm-storage-Gi) != 0 && endswith((var.vm-storage-Gi), "Gi") : true
+ condition = var.deploy-maas ? var.vm-storage-Gi != "REPLACE ME" && length(var.vm-storage-Gi) != 0 && endswith((var.vm-storage-Gi), "Gi") : true
error_message = "Provide a valid amount of storage for your VM. You must include 'Gi' at the end of your numerical value. Example: '50Gi'."
}
}
@@ -385,7 +385,17 @@ variable "vm-memory-Gi" {
description = "The amount of storage to provision for your VM in Gi."
validation {
- condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
+ condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
error_message = "Provide a valid amount of memory to allocate your VM. You must include 'Gi' at the end of your numerical value. Example: '4Gi'."
}
+}
+
+variable "palette-user-id" {
+ type = string
+ description = "The amount of storage to provision for your VM in Gi."
+
+ validation {
+ condition = var.deploy-maas ? var.palette-user-id != "REPLACE ME" && length(var.palette-user-id) != 0 : true
+ error_message = "Provide a valid Palette user ID."
+ }
}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
index 6244567..4f14d9e 100644
--- a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -129,4 +129,29 @@ spec:
- accessModes:
- ReadWriteMany
volumeMode: Block
- cloneStrategy: csi-clone
\ No newline at end of file
+ cloneStrategy: csi-clone
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: virtual-machine-orchestrator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: virtual-machine-orchestrator-admin
+subjects:
+- kind: ServiceAccount
+ name: virtual-machine-orchestrator
+ namespace: vm-dashboard
+#---
+# apiVersion: rbac.authorization.k8s.io/v1
+# kind: RoleBinding
+# metadata:
+# name: cluster-admin
+# subjects:
+# - kind: User
+# name: ${palette-user-id}
+# apiGroup: rbac.authorization.k8s.io
+# roleRef:
+# kind: ClusterRole
+# name: cluster-admin
\ No newline at end of file
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index c23f191..0eee3c6 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -4,68 +4,73 @@
#####################
# Palette Settings
#####################
-palette-project = "Default" # The name of your project in Palette.
+palette-project = "Default" # The name of your project in Palette.
############################
# MAAS Deployment Settings
############################
-deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
-deploy-maas-vm = true # Set to true to create a VM on MAAS VMO cluster once deployed.
-pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
-maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
-maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
-maas-control-plane-resource-pool = "Palette-Sustaining" # Provide a resource pool for the control plane nodes.
-maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
-maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
-ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
-ctl-node-min-memory-mb = 8096 # Minimum amount of RAM (memory) required for control plane nodes
-maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
-maas-worker-resource-pool = "vmo-validation" # Provide a resource pool for the worker nodes.
-maas-worker-azs = ["az1"] # Provide a set of availability zones for the worker nodes.
-maas-worker-node-tags = ["docs"] # Provide a set of node tags for the worker nodes.
-wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
-wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
+deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
+deploy-maas-vm = false # Set to true to create a VM on MAAS VMO cluster once deployed.
+pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
+maas-control-plane-resource-pool = "docs" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
+ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
+ctl-node-min-memory-mb = 8096 # Minimum amount of RAM (memory) required for control plane nodes
+maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
+maas-worker-resource-pool = "docs" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["az1"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["docs"] # Provide a set of node tags for the worker nodes.
+wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
+wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
#####################
# cluster_profiles.tf
#####################
vmo-cluster-name = "vmo-cluster-maas"
-cluster-profile-type = "Full" # Infrastructure, Full, or Add-on
-cluster-profile-version = "1.0.0 " # Version number for the cluster profile in Palette
+cluster-profile-type = "cluster" # infra, cluster, add-on, or system
+cluster-profile-version = "1.0.0" # Version number for the cluster profile in Palette
#####################
# vmo-values.tf
####################
-vmo-network-interface = ["br0"]
-vm-vlans = 1
-host-vlans = 1
+vmo-network-interface = ["br0"]
+vm-vlans = 1
+host-vlans = 1
###########################
# manifests/k8s-values.yaml
###########################
-pod-CIDR = ["100.64.0.0/18"] # Set the subnet that your pods will run on
-cluster-services-CIDR = ["100.64.64.0/18"]
+pod-CIDR = ["100.64.0.0/18"] # Set the subnet that your pods will run on
+cluster-services-CIDR = ["100.64.64.0/18"]
###############################
# manifests/metallb-values.yaml
###############################
-metallb-ip-pool = ["10.11.130.129-10.11.130.131"] # IP addresses to be assigned for use by MetalLB
+metallb-ip-pool = ["10.11.130.129-10.11.130.131"] # IP addresses to be assigned for use by MetalLB
###############################
# manifests/ubuntu-values.yaml
###############################
-node-network = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for use by MetalLB
+node-network = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for use by MetalLB
#####################
# virtual_machines.tf
#####################
-vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
-vm-deploy-name = "vmo-vm" # The name of your VM
-vm-labels = ["my-vmo-vm"] # Labels that will be applied to your VM.
-vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
-vm-cpu-cores = 2 # Number of CPU cores your VM will have.
-vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
-vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
-vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
\ No newline at end of file
+vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
+vm-deploy-name = "vmo-vm" # The name of your VM
+vm-labels = ["my-vmo-vm"] # Labels that will be applied to your VM.
+vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
+vm-cpu-cores = 2 # Number of CPU cores your VM will have.
+vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
+vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
+vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
+
+#####################
+# vmo-extras-manifest.yaml
+#####################
+palette-user-id = "kenneth.heaslip@spectrocloud.com"
diff --git a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
index c89a7b9..6477b41 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-replace-values.tftest.hcl
@@ -15,6 +15,13 @@ variables {
maas-control-plane-resource-pool = "REPLACE ME"
maas-control-plane-azs = ["REPLACE ME"]
maas-control-plane-node-tags = ["REPLACE ME"]
+ vmo-network-interface = ["REPLACE ME"]
+ vm-vlans = 1
+ host-vlans = 1
+ pod-CIDR = ["REPLACE ME"]
+ cluster-services-CIDR = ["REPLACE ME"]
+ metallb-ip-pool = ["REPLACE ME"]
+ node-network = "REPLACE ME"
}
mock_provider "spectrocloud" {
@@ -32,6 +39,11 @@ run "verify_maas" {
var.maas-worker-node-tags,
var.maas-control-plane-resource-pool,
var.maas-control-plane-azs,
- var.maas-control-plane-node-tags
+ var.maas-control-plane-node-tags,
+ var.vmo-network-interface,
+ var.pod-CIDR,
+ var.cluster-services-CIDR,
+ var.metallb-ip-pool,
+ var.node-network
]
}
diff --git a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
index 60f9ae4..ee6ab91 100644
--- a/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
+++ b/terraform/vmo-cluster/tests/maas-cluster-vm.tftest.hcl
@@ -15,6 +15,14 @@ variables {
maas-control-plane-resource-pool = "test-cp-pool"
maas-control-plane-azs = ["test-cp-az"]
maas-control-plane-node-tags = ["test-cp-tags"]
+ vm-deploy-namespace = "virtual-machines"
+ vm-deploy-name = "test-vm"
+ vm-labels = ["my-vm"]
+ vm-storage-Gi = "50Gi"
+ vm-cpu-cores = 2
+ vm-cpu-sockets = 1
+ vm-cpu-threads = 2
+ vm-memory-Gi = "4Gi"
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl b/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl
new file mode 100644
index 0000000..bd64f34
--- /dev/null
+++ b/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl
@@ -0,0 +1,33 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 4 - Verify PCG name, domain, resource pools, AZs and node tags cannot be empty.
+
+variables {
+ deploy-maas = true
+ deploy-maas-vm = false
+ vm-deploy-namespace = ""
+ vm-deploy-name = ""
+ vm-labels = []
+ vm-storage-Gi = ""
+ vm-cpu-cores = 1
+ vm-cpu-sockets = 1
+ vm-cpu-threads = 2
+ vm-memory-Gi = ""
+
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.vm-deploy-namespace,
+ var.vm-deploy-name,
+ var.vm-labels,
+ var.vm-storage-Gi,
+ var.vm-memory-Gi,
+ ]
+}
diff --git a/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
new file mode 100644
index 0000000..e43a404
--- /dev/null
+++ b/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
@@ -0,0 +1,35 @@
+# Copyright (c) Spectro Cloud
+# SPDX-License-Identifier: Apache-2.0
+# Test case 5 - Verify PCG name, domain, resource pools, AZs and node tags cannot have REPLACE ME values.
+
+variables {
+
+vm-deploy-namespace = "REPLACE ME"
+vm-deploy-name = "REPLACE ME"
+vm-labels = ["REPLACE ME"]
+vm-storage-Gi = "REPLACE ME"
+vm-cpu-cores = 2
+vm-cpu-sockets = 1
+vm-cpu-threads = 2
+vm-memory-Gi = "REPLACE ME"
+
+
+
+
+}
+
+mock_provider "spectrocloud" {
+}
+
+run "verify_maas" {
+
+ command = plan
+
+ expect_failures = [
+ var.vm-deploy-namespace,
+ var.vm-deploy-name,
+ var.vm-labels,
+ var.vm-storage-Gi,
+ var.vm-memory-Gi,
+ ]
+}
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
index 8ac1996..d8c62af 100644
--- a/terraform/vmo-cluster/virtual_machines.tf
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -9,9 +9,10 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
cluster_uid = data.spectrocloud_cluster.maas_vmo_cluster[0].id
cluster_context = data.spectrocloud_cluster.maas_vmo_cluster[0].context
- run_on_launch = true
- namespace = var.vm-deploy-namespace
- name = var.vm-deploy-name
+ #run_on_launch = true
+ run_strategy = "Halted"
+ namespace = var.vm-deploy-namespace
+ name = var.vm-deploy-name
timeouts {
create = "60m"
@@ -20,12 +21,13 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
labels = {
#var.vm-labels
# "tf" = "spectrocloud-tutorials"
- "kubevirt.io/vm" = "ubuntu-tutorial-vm"
+ "kubevirt.io/vm" = "ubuntu-2204"
}
data_volume_templates {
metadata {
- name = "ubuntu-tutorial-vm"
+ name = "ubuntu-2204"
+ namespace = var.vm-deploy-namespace
}
spec {
source {
@@ -48,10 +50,10 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
volume {
- name = "ubuntu-tutorial-vm"
+ name = "ubuntu-2204"
volume_source {
data_volume {
- name = "ubuntu-tutorial-vm"
+ name = "ubuntu-2204"
}
}
}
@@ -66,7 +68,7 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
disk {
- name = "ubuntu-tutorial-vm"
+ name = "ubuntu-2204"
disk_device {
disk {
bus = "virtio"
From 50b11bbf6d4f82da5082de566bcb528bb5fc6574 Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Mon, 16 Jun 2025 13:47:58 -0400
Subject: [PATCH 07/11] Draft-RC1
---
terraform/vmo-cluster/cluster_profiles.tf | 7 +-
terraform/vmo-cluster/inputs.tf | 10 -
.../vmo-cluster/manifests/ubuntu-values.yaml | 6 +-
.../manifests/vmo-extras-manifest copy.yaml | 132 ++
.../manifests/vmo-extras-manifest.yaml | 27 +-
.../vmo-cluster/manifests/vmo-values.yaml | 1313 ++++++++---------
terraform/vmo-cluster/terraform.tfvars | 8 +-
...virtual-machines-replace-values.tftest.hcl | 5 -
.../vmo-cluster/virtual-machines/cloud-init | 16 +-
terraform/vmo-cluster/virtual_machines.tf | 4 +-
terraform/vmo-cluster/vm-test.yaml | 87 ++
11 files changed, 895 insertions(+), 720 deletions(-)
create mode 100644 terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml
create mode 100644 terraform/vmo-cluster/vm-test.yaml
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
index c82f549..9becb9b 100644
--- a/terraform/vmo-cluster/cluster_profiles.tf
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -1,4 +1,3 @@
-
##########################
# MAAS VMO Cluster Profile
##########################
@@ -29,8 +28,8 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
values = templatefile("manifests/k8s-values.yaml", {
pod-CIDR = var.pod-CIDR,
clusterServicesCIDR = var.cluster-services-CIDR
- type = "spectro"
})
+ type = "spectro"
}
pack {
@@ -80,9 +79,7 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
values = file("manifests/vmo-extras-values.yaml")
manifest {
name = "vmo-extras"
- content = templatefile("manifests/vmo-extras-manifest.yaml", {
- palette-user-id = var.palette-user-id
- })
+ content = file("manifests/vmo-extras-manifest.yaml")
}
}
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index 348efe7..b7ee3eb 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -388,14 +388,4 @@ variable "vm-memory-Gi" {
condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
error_message = "Provide a valid amount of memory to allocate your VM. You must include 'Gi' at the end of your numerical value. Example: '4Gi'."
}
-}
-
-variable "palette-user-id" {
- type = string
- description = "The amount of storage to provision for your VM in Gi."
-
- validation {
- condition = var.deploy-maas ? var.palette-user-id != "REPLACE ME" && length(var.palette-user-id) != 0 : true
- error_message = "Provide a valid Palette user ID."
- }
}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
index 427d5dc..26d80be 100644
--- a/terraform/vmo-cluster/manifests/ubuntu-values.yaml
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -6,12 +6,12 @@ kubeadmconfig:
- apt install -y grepcidr
- |
# Enter as a CIDR '10.11.130.0/24'
- NETWORKS=${node-network}
+ NETWORKS="10.11.130.0/24"
IPS=$(hostname -I)
for IP in $IPS
do
- echo "$IP" | grepcidr "$NETWORKS" >/dev/null && echo " --node-ip=$IP" >> /etc/default/kubelet
- if [ $? == 0 ]; then break; fi
+ echo "$IP" | grepcidr "$NETWORKS" >/dev/null && echo " --node-ip=$IP" >> /etc/default/kubelet
+ if [ $? == 0 ]; then break; fi
done
# Increase audit_backlog_limit
- sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="audit_backlog_limit=256"/g' /etc/default/grub
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml
new file mode 100644
index 0000000..6244567
--- /dev/null
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml
@@ -0,0 +1,132 @@
+apiVersion: spectrocloud.com/v1
+kind: VmTemplate
+metadata:
+ name: ubuntu-2204
+spec:
+ description: Ubuntu 22.04
+ displayName: Ubuntu 22.04
+ icon: https://s3.amazonaws.com/manifests.spectrocloud.com/logos/ubuntu.png
+ dataVolumeTemplates:
+ - metadata:
+ name: ubuntu-2204
+ spec:
+ source:
+ pvc:
+ name: template-ubuntu-2204
+ namespace: vmo-golden-images
+ pvc:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ volumeMode: Block
+ storageClassName: ceph-block
+ template:
+ metadata:
+ annotations:
+ descheduler.alpha.kubernetes.io/evict: "true"
+ labels:
+ kubevirt.io/size: small
+ kubevirt.io/domain: hellouni
+ spec:
+ domain:
+ cpu:
+ cores: 2
+ sockets: 1
+ threads: 1
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: datavolume-os
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ interfaces:
+ - masquerade: {}
+ name: default
+ model: virtio
+ #macAddress: '00:5e:ab:cd:ef:01'
+ machine:
+ type: q35
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ memory: 2Gi
+ networks:
+ - name: default
+ pod: {}
+ volumes:
+ - dataVolume:
+ name: ubuntu-2204
+ name: datavolume-os
+ - cloudInitNoCloud:
+ userData: |
+ #cloud-config
+ ssh_pwauth: True
+ chpasswd: { expire: False }
+ password: spectro
+ disable_root: false
+ runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
+ name: cloudinitdisk
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: DataVolume
+metadata:
+ name: "template-ubuntu-2204"
+ namespace: "vmo-golden-images"
+ annotations:
+ cdi.kubevirt.io/storage.deleteAfterCompletion: "false"
+ cdi.kubevirt.io/storage.bind.immediate.requested: ""
+spec:
+ storage:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ volumeMode: Block
+ storageClassName: ceph-block
+ source:
+ registry:
+ url: "docker://gcr.io/spectro-images-public/release/vm-dashboard/os/ubuntu-container-disk:22.04"
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: StorageProfile
+metadata:
+ name: ceph-filesystem
+spec:
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode: Filesystem
+ cloneStrategy: csi-clone
+---
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: StorageProfile
+metadata:
+ name: ceph-block
+spec:
+ claimPropertySets:
+ - accessModes:
+ - ReadWriteMany
+ volumeMode: Block
+ cloneStrategy: csi-clone
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
index 4f14d9e..6244567 100644
--- a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -129,29 +129,4 @@ spec:
- accessModes:
- ReadWriteMany
volumeMode: Block
- cloneStrategy: csi-clone
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: virtual-machine-orchestrator
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: virtual-machine-orchestrator-admin
-subjects:
-- kind: ServiceAccount
- name: virtual-machine-orchestrator
- namespace: vm-dashboard
-#---
-# apiVersion: rbac.authorization.k8s.io/v1
-# kind: RoleBinding
-# metadata:
-# name: cluster-admin
-# subjects:
-# - kind: User
-# name: ${palette-user-id}
-# apiGroup: rbac.authorization.k8s.io
-# roleRef:
-# kind: ClusterRole
-# name: cluster-admin
\ No newline at end of file
+ cloneStrategy: csi-clone
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
index cd4752f..8e66e80 100644
--- a/terraform/vmo-cluster/manifests/vmo-values.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -1,668 +1,667 @@
pack:
- content:
- images:
- - image: us-docker.pkg.dev/palette-images/palette/spectro-vm-dashboard:4.6.3
- - image: us-docker.pkg.dev/palette-images/third-party/kubevirt-ui:v19
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-operator:v1.4.0
- - image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v8.1.0
- - image: registry.k8s.io/sig-storage/snapshot-controller:v8.1.0
- - image: registry.k8s.io/descheduler/descheduler:v0.32.0
- - image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.1.4-thick
- - image: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller:latest-amd64
- - image: quay.io/kubevirt/cdi-operator:v1.61.0
- - image: quay.io/kubevirt/cdi-uploadproxy:v1.61.0
- - image: quay.io/kubevirt/cdi-controller:v1.61.0
- - image: quay.io/kubevirt/cdi-apiserver:v1.61.0
- - image: quay.io/kubevirt/cdi-importer:v1.61.0
- - image: quay.io/kubevirt/cdi-uploadserver:v1.61.0
- - image: quay.io/kubevirt/cdi-cloner:v1.61.0
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-handler:v1.4.0
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-launcher:v1.4.0
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-exportproxy:v1.4.0
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-exportserver:v1.4.0
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-controller:v1.4.0
- - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-api:v1.4.0
- - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/os/ubuntu-container-disk:22.04
- - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/os/fedora-container-disk:37
- - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/vlan-filtering/ubuntu:latest
- - image: us-docker.pkg.dev/palette-images/palette/spectro-cleanup:1.0.3
- - image: us-docker.pkg.dev/palette-images/palette/spectro-kubectl:v1.31.5-vmo
- namespace: vm-dashboard
- palette:
- config:
- dashboard:
- access: private
- spectrocloud.com/install-priority: "10"
+ content:
+ images:
+ - image: us-docker.pkg.dev/palette-images/palette/spectro-vm-dashboard:4.6.3
+ - image: us-docker.pkg.dev/palette-images/third-party/kubevirt-ui:v19
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-operator:v1.4.0
+ - image: registry.k8s.io/sig-storage/snapshot-validation-webhook:v8.1.0
+ - image: registry.k8s.io/sig-storage/snapshot-controller:v8.1.0
+ - image: registry.k8s.io/descheduler/descheduler:v0.32.0
+ - image: ghcr.io/k8snetworkplumbingwg/multus-cni:v4.1.4-thick
+ - image: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller:latest-amd64
+ - image: quay.io/kubevirt/cdi-operator:v1.61.0
+ - image: quay.io/kubevirt/cdi-uploadproxy:v1.61.0
+ - image: quay.io/kubevirt/cdi-controller:v1.61.0
+ - image: quay.io/kubevirt/cdi-apiserver:v1.61.0
+ - image: quay.io/kubevirt/cdi-importer:v1.61.0
+ - image: quay.io/kubevirt/cdi-uploadserver:v1.61.0
+ - image: quay.io/kubevirt/cdi-cloner:v1.61.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-handler:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-launcher:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-exportproxy:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-exportserver:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-controller:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-api:v1.4.0
+ - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/os/ubuntu-container-disk:22.04
+ - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/os/fedora-container-disk:37
+ - image: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/vlan-filtering/ubuntu:latest
+ - image: us-docker.pkg.dev/palette-images/palette/spectro-cleanup:1.0.3
+ - image: us-docker.pkg.dev/palette-images/palette/spectro-kubectl:v1.31.5-vmo
+ namespace: vm-dashboard
+ palette:
+ config:
+ dashboard:
+ access: private
+ spectrocloud.com/install-priority: "10"
charts:
- virtual-machine-orchestrator:
+ virtual-machine-orchestrator:
+ image:
+ repository: us-docker.pkg.dev/palette-images/palette/spectro-vm-dashboard
+ tag: "4.6.3"
+ service:
+ type: "ClusterIP"
+ appConfig:
+ clusterInfo:
+ consoleBaseAddress: ""
+ fullnameOverride: "virtual-machine-orchestrator"
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: "virtual-machine-orchestrator"
+ sampleTemplates:
+ fedora37: false
+ ubuntu2204: false
+ ubuntu2204WithVol: false
+ ubuntu2204staticIP: false
+ fedora37staticIP: false
+ # To create additional vm templates refer to https://docs.spectrocloud.com/vm-management/create-manage-vm/create-vm-template
+ # This namespace used to store golden images.
+
+ goldenImagesNamespace: "vmo-golden-images"
+ # These namespaces are created and set up to deploy VMs into
+ vmEnabledNamespaces:
+ - "default"
+ - "virtual-machines"
+ - ns-adv
+ - ns-edge
+ - ns-product
+ - ns-packs
+ grafana:
+ namespace: monitoring
+ vlanFiltering:
+ enabled: true
+ namespace: kube-system
+ image:
+ repository: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/vlan-filtering/ubuntu
+ pullPolicy: IfNotPresent
+ tag: "latest"
+ env:
+ # Which bridge interface to control
+ bridgeIF: "br0"
+ # Beginning of VLAN range to enable
+ allowedVlans: "1"
+ # Set to "true" to enable VLANs on the br0 interface for the host to use itself
+ allowVlansOnSelf: "true"
+ # Beginning of VLAN range to enable for use by the node itself
+ allowedVlansOnSelf: "1"
+ snapshot-controller:
+ enabled: true
+ replicas: 1
+ # controller image and policies
+ image:
+ repository: registry.k8s.io/sig-storage/snapshot-controller
+ pullPolicy: IfNotPresent
+ tag: "v8.1.0"
+ # A list/array of extra args to use
+ # when running the controller. Default args include log verbose level
+ # and leader election
+ extraArgs: []
+ # snapshot webhook config
+ webhook:
+ # all below values take effect only if webhook is enabled
+ enabled: true
+ # webhook controller image and policies
image:
- repository: us-docker.pkg.dev/palette-images/palette/spectro-vm-dashboard
- tag: "4.6.3"
+ # change the image if you wish to use your own custom validation server image
+ repository: registry.k8s.io/sig-storage/snapshot-validation-webhook
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v8.1.0"
+ validatingWebhook:
+ failurePolicy: Fail
+ timeoutSeconds: 2
+ # Validating webhook is exposed on an HTTPS endpoint, and so
+ # TLS certificate is required. This Helm chart relies on
+ # cert-manager.io for managing TLS certificates.
+ tls:
+ # If not empty, this issuer is used to sign the certificate.
+ # If none is provided, a new, self-signing issuer is created.
+ issuerRef: {}
+ # name:
+ # kind:
+ # group: cert-manager.io
+
+ # Certificate duration. The generated certificate is automatically
+ # renewed 1/3 of `certDuration` before its expiry.
+ # Value must be in units accepted by Go time.ParseDuration.
+ # See https://golang.org/pkg/time/#ParseDuration for allowed formats.
+ # Minimum accepted duration is `1h`.
+ # This option may be ignored/overridden by some issuer types.
+ certDuration: 8760h
service:
- type: "ClusterIP"
- appConfig:
- clusterInfo:
- consoleBaseAddress: ""
- fullnameOverride: "virtual-machine-orchestrator"
+ type: ClusterIP
+ port: 443
serviceAccount:
- # Specifies whether a service account should be created
- create: true
- # Annotations to add to the service account
- annotations: {}
- # The name of the service account to use.
- # If not set and create is true, a name is generated using the fullname template
- name: "virtual-machine-orchestrator"
- sampleTemplates:
- fedora37: false
- ubuntu2204: false
- ubuntu2204WithVol: false
- ubuntu2204staticIP: false
- fedora37staticIP: false
- # To create additional vm templates refer to https://docs.spectrocloud.com/vm-management/create-manage-vm/create-vm-template
- # This namespace used to store golden images.
+ # Specifies whether a service account should be created.
+ create: true
+ # Annotations to add to the service account.
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template.
+ name: ""
+ # Log verbosity level.
+ # See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md
+ # for description of individual verbosity levels.
+ logVerbosityLevel: 2
+ podAnnotations: {}
+ resources: {}
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ nameOverride: ""
+ fullnameOverride: ""
+ imagePullSecrets: []
+ nameOverride: ""
+ fullnameOverride: ""
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
- goldenImagesNamespace: "vmo-golden-images"
- # These namespaces are created and set up to deploy VMs into
- vmEnabledNamespaces:
- - "default"
- - "virtual-machines"
- - ns-adv
- - ns-edge
- - ns-product
- - ns-packs
- grafana:
- namespace: monitoring
- vlanFiltering:
- enabled: true
- namespace: kube-system
- image:
- repository: us-docker.pkg.dev/palette-images/palette/virtual-machine-orchestrator/vlan-filtering/ubuntu
- pullPolicy: IfNotPresent
- tag: "latest"
- env:
- # Which bridge interface to control
- bridgeIF: "br0"
- # Beginning of VLAN range to enable
- allowedVlans: "1"
- # Set to "true" to enable VLANs on the br0 interface for the host to use itself
- allowVlansOnSelf: "true"
- # Beginning of VLAN range to enable for use by the node itself
- allowedVlansOnSelf: "1"
- snapshot-controller:
- enabled: true
- replicas: 1
- # controller image and policies
- image:
- repository: registry.k8s.io/sig-storage/snapshot-controller
- pullPolicy: IfNotPresent
- tag: "v8.1.0"
- # A list/array of extra args to use
- # when running the controller. Default args include log verbose level
- # and leader election
- extraArgs: []
- # snapshot webhook config
- webhook:
- # all below values take effect only if webhook is enabled
- enabled: true
- # webhook controller image and policies
- image:
- # change the image if you wish to use your own custom validation server image
- repository: registry.k8s.io/sig-storage/snapshot-validation-webhook
- pullPolicy: IfNotPresent
- # Overrides the image tag whose default is the chart appVersion.
- tag: "v8.1.0"
- validatingWebhook:
- failurePolicy: Fail
- timeoutSeconds: 2
- # Validating webhook is exposed on an HTTPS endpoint, and so
- # TLS certificate is required. This Helm chart relies on
- # cert-manager.io for managing TLS certificates.
- tls:
- # If not empty, this issuer is used to sign the certificate.
- # If none is provided, a new, self-signing issuer is created.
- issuerRef: {}
- # name:
- # kind:
- # group: cert-manager.io
-
- # Certificate duration. The generated certificate is automatically
- # renewed 1/3 of `certDuration` before its expiry.
- # Value must be in units accepted by Go time.ParseDuration.
- # See https://golang.org/pkg/time/#ParseDuration for allowed formats.
- # Minimum accepted duration is `1h`.
- # This option may be ignored/overridden by some issuer types.
- certDuration: 8760h
- service:
- # when running in cluster webhook service is recommended to be of type ClusterIP
- type: ClusterIP
- port: 443
- serviceAccount:
- # Specifies whether a service account should be created.
- create: true
- # Annotations to add to the service account.
- annotations: {}
- # The name of the service account to use.
- # If not set and create is true, a name is generated using the fullname template.
- name: ""
- # Log verbosity level.
- # See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md
- # for description of individual verbosity levels.
- logVerbosityLevel: 2
- podAnnotations: {}
- resources: {}
- nodeSelector: {}
- tolerations: []
- affinity: {}
- nameOverride: ""
- fullnameOverride: ""
- imagePullSecrets: []
- nameOverride: ""
- fullnameOverride: ""
- resources: {}
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
- nodeSelector: {}
- tolerations: []
- affinity: {}
- # create a default volume snapshot class
- volumeSnapshotClass:
- create: true
- name: "ceph-block-snapshot-class"
- driver: "rook-ceph.rbd.csi.ceph.com"
- # deletionPolicy determines whether a VolumeSnapshotContent created through
- # the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
- # Supported values are "Retain" and "Delete".
- deletionPolicy: "Delete"
- # params is a key-value map with storage driver specific parameters for creating snapshots.
- params:
- clusterID: rook-ceph
- csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
- csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
- # key-value pair of extra labels to apply to the volumesnapshotclass
- extraLabels:
- velero.io/csi-volumesnapshot-class: "true"
- # time for sleep hook in seconds
- hooksleepTime: 12
- kubevirt:
- enabled: true
- # defaults to kubevirt
- namespace: kubevirt
- namespaceLabels:
- pod-security.kubernetes.io/enforce: privileged
- pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
- replicas: 1
- service:
- type: LoadBalancer
- port: 443
- targetPort: 8443
- image:
- repository: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-operator
- pullPolicy: IfNotPresent
- # Overrides the image tag whose default is the chart appVersion.
- tag: "v1.4.0"
- ## The Kubevirt CR that gets created
- kubevirtResource:
- name: kubevirt
- useEmulation: false
- # below gates are required for virtual machine orchestrator pack, users can append additional gates
- additionalFeatureGates:
- - LiveMigration
- - HotplugVolumes
- - Snapshot
- - VMExport
- - ExpandDisks
- - HotplugNICs
- - VMLiveUpdateFeatures
- - VMPersistentState
- - VolumesUpdateStrategy
- - VolumeMigration
- - CPUManager
- - Sidecar
- #- VMPersistentState
- # for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
- config:
- evictionStrategy: "LiveMigrate"
- # additionalConfig lets you define any configuration other than developerConfiguration and evictionStrategy
- additionalConfig:
- vmStateStorageClass: "ceph-filesystem"
- #vmStateStorageClass: "" #fileSystem-based storageclass for persistent TPM
- migrations:
- allowAutoConverge: true
- completionTimeoutPerGiB: 800
- # additionalDevConfig lets you define dev config other than emulation and feature gate
- additionalDevConfig: {}
- # vmRolloutStrategy lets you define how changes to a VM object propagate to its VMI objects
- vmRolloutStrategy: LiveUpdate
- certificateRotateStrategy: {}
- customizeComponents:
- # flags:
- # api:
- # v:
- # "5"
- # port:
- # "8443"
- imagePullPolicy: IfNotPresent
- infra: {}
- # The name of the Prometheus service account that needs read-access to KubeVirt endpoints
- monitorAccount: "prometheus-operator-prometheus"
- # The namespace Prometheus is deployed in
- monitorNamespace: "monitoring"
- # The namespace the service monitor is deployed to. Either specify this or the monitorNamespace
- serviceMonitorNamespace: "monitoring"
- workloads: {}
- workloadsUpdateStrategy:
- workloadUpdateMethods:
- - LiveMigrate
- # uninstallStrategy to use, options are RemoveWorkloads, BlockUninstallIfWorkloadsExist
- uninstallStrategy: ""
- ingress:
- enabled: false
- ingressClassName: nginx
- annotations:
- cert-manager.io/issuer: kubevirt-selfsigned-issuer
- nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
- labels: {}
- hosts:
- - host: virt-exportproxy.maas.sc
- paths:
- - path: /
- pathType: ImplementationSpecific
- # tls:
- # - secretName: chart-example-tls
- # hosts:
- # - virt-exportproxy.maas.sc
- cdi:
- enabled: true
- namespaceLabels:
- pod-security.kubernetes.io/enforce: privileged
- pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
- replicas: 1
- image:
- repository: quay.io/kubevirt/cdi-operator
- pullPolicy: IfNotPresent
- # Overrides the image tag whose default is the chart appVersion.
- tag: "v1.61.0"
- # set enabled to true and add private registry details to bring up VMs in airgap environment
- privateRegistry:
- enabled: false
- registryIP: #Ex: 10.10.225.20
- registryBasePath:
- #Ex: specto-images
- serviceAccount:
- # Specifies whether a service account should be created
- create: true
- # Annotations to add to the service account
- annotations: {}
- # The name of the service account to use.
- # If not set and create is true, a name is generated using the fullname template
- name: ""
- service:
- type: LoadBalancer
- port: 443
- targetPort: 8443
- ingress:
- enabled: false
- className: "nginx"
- annotations:
- cert-manager.io/issuer: cdi-selfsigned-issuer
- nginx.ingress.kubernetes.io/proxy-body-size: "0"
- nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
- nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
- nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
- nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
- hosts:
- - host: cdi-uploadproxy.maas.sc
- paths:
- - path: /
- pathType: ImplementationSpecific
- tls: []
- # - secretName: chart-example-tls
- # hosts:
- # - cdi-uploadproxy.maas.sc
- resources: {}
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ # create a default volume snapshot class
+ volumeSnapshotClass:
+ create: true
+ name: "ceph-block-snapshot-class"
+ driver: "rook-ceph.rbd.csi.ceph.com"
+ # deletionPolicy determines whether a VolumeSnapshotContent created through
+ # the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ # Supported values are "Retain" and "Delete".
+ deletionPolicy: "Delete"
+ # params is a key-value map with storage driver specific parameters for creating snapshots.
+ params:
+ clusterID: rook-ceph
+ csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
+ csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
+ # key-value pair of extra labels to apply to the volumesnapshotclass
+ extraLabels:
+ velero.io/csi-volumesnapshot-class: "true"
+ # time for sleep hook in seconds
+ hooksleepTime: 12
+ kubevirt:
+ enabled: true
+ # defaults to kubevirt
+ namespace: kubevirt
+ namespaceLabels:
+ pod-security.kubernetes.io/enforce: privileged
+ pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
+ replicas: 1
+ service:
+ type: LoadBalancer
+ port: 443
+ targetPort: 8443
+ image:
+ repository: us-docker.pkg.dev/palette-images/palette/kubevirt/virt-operator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v1.4.0"
+ ## The Kubevirt CR that gets created
+ kubevirtResource:
+ name: kubevirt
+ useEmulation: false
+ # below gates are required for virtual machine orchestrator pack, users can append additional gates
+ additionalFeatureGates:
+ - LiveMigration
+ - HotplugVolumes
+ - Snapshot
+ - VMExport
+ - ExpandDisks
+ - HotplugNICs
+ - VMLiveUpdateFeatures
+ - VMPersistentState
+ - VolumesUpdateStrategy
+ - VolumeMigration
+ - CPUManager
+ - Sidecar
+ #- VMPersistentState
+ # for additional feature gates refer to https://docs.spectrocloud.com/vm-management#featuregates
+ config:
+ evictionStrategy: "LiveMigrate"
+ # additionalConfig lets you define any configuration other than developerConfiguration and evictionStrategy
+ additionalConfig:
+ vmStateStorageClass: "ceph-filesystem"
+ #vmStateStorageClass: "" #fileSystem-based storageclass for persistent TPM
+ migrations:
+ allowAutoConverge: true
+ completionTimeoutPerGiB: 800
+ # additionalDevConfig lets you define dev config other than emulation and feature gate
+ additionalDevConfig: {}
+ # vmRolloutStrategy lets you define how changes to a VM object propagate to its VMI objects
+ vmRolloutStrategy: LiveUpdate
+ certificateRotateStrategy: {}
+ customizeComponents:
+ # flags:
+ # api:
+ # v:
+ # "5"
+ # port:
+ # "8443"
+ imagePullPolicy: IfNotPresent
+ infra: {}
+ # The name of the Prometheus service account that needs read-access to KubeVirt endpoints
+ monitorAccount: "prometheus-operator-prometheus"
+ # The namespace Prometheus is deployed in
+ monitorNamespace: "monitoring"
+ # The namespace the service monitor is deployed to. Either specify this or the monitorNamespace
+ serviceMonitorNamespace: "monitoring"
+ workloads: {}
+ workloadsUpdateStrategy:
+ workloadUpdateMethods:
+ - LiveMigrate
+ # uninstallStrategy to use, options are RemoveWorkloads, BlockUninstallIfWorkloadsExist
+ uninstallStrategy: ""
+ ingress:
+ enabled: false
+ ingressClassName: nginx
+ annotations:
+ cert-manager.io/issuer: kubevirt-selfsigned-issuer
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ labels: {}
+ hosts:
+ - host: virt-exportproxy.maas.sc
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ # tls:
+ # - secretName: chart-example-tls
+ # hosts:
+ # - virt-exportproxy.maas.sc
+ cdi:
+ enabled: true
+ namespaceLabels:
+ pod-security.kubernetes.io/enforce: privileged
+ pod-security.kubernetes.io/enforce-version: v{{ .spectro.system.kubernetes.version | substr 0 4 }}
+ replicas: 1
+ image:
+ repository: quay.io/kubevirt/cdi-operator
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v1.61.0"
+ # set enabled to true and add private registry details to bring up VMs in airgap environment
+ privateRegistry:
+ enabled: false
+ registryIP: #Ex: 10.10.225.20
+ registryBasePath:
+ #Ex: specto-images
+ serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+ service:
+ type: LoadBalancer
+ port: 443
+ targetPort: 8443
+ ingress:
+ enabled: false
+ className: "nginx"
+ annotations:
+ cert-manager.io/issuer: cdi-selfsigned-issuer
+ nginx.ingress.kubernetes.io/proxy-body-size: "0"
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "600"
+ nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
+ nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
+ hosts:
+ - host: cdi-uploadproxy.maas.sc
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - cdi-uploadproxy.maas.sc
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
- ## The CDI CR that gets created
- cdiResource:
- additionalFeatureGates: # - FeatureName
- additionalConfig:
- filesystemOverhead:
- global: "0.08"
- storageClass:
- spectro-storage-class: "0.08"
- podResourceRequirements:
- requests:
- cpu: 250m
- memory: 1G
- limits:
- cpu: 1
- memory: 8G
- insecureRegistries: [] # List of insecure registries to allow in the CDI importer, preffered in air-gapped environments
- importProxy:
- #HTTPProxy: "http://username:password@your-proxy-server:3128"
- #HTTPSProxy: "http://username:password@your-proxy-server:3128"
- #noProxy: "127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.company.local"
- #TrustedCAProxy: configmap-name # optional: the ConfigMap name of an user-provided trusted certificate authority (CA) bundle to be added to the importer pod CA bundle
- additionalSpec:
- infra:
- nodeSelector:
- kubernetes.io/os: linux
- tolerations:
- - key: CriticalAddonsOnly
- operator: Exists
- workload:
- nodeSelector:
- kubernetes.io/os: linux
- imagePullPolicy: IfNotPresent
- multus:
- enabled: true
- image:
- repository: ghcr.io/k8snetworkplumbingwg/multus-cni
- pullPolicy: IfNotPresent
- # Overrides the image tag whose default is the chart appVersion.
- tag: "v4.1.4-thick"
- networkController:
- criSocket:
- enableK3SHostPath: false # true for K3S and RKE2, false for PXK-E
- paletteAgentMode: false # true for running Palette Agent Mode clusters with PXK-E
- # criSocketHostPathOverride: /run/containerd/containerd.sock
- imagePullSecrets: []
- podAnnotations: {}
- resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- limits:
- cpu: 100m
- memory: 1Gi
- requests:
- cpu: 100m
- memory: 50Mi
- nodeSelector: {}
- affinity: {}
- dpdkCompatibility: false
- cleanup:
- image: us-docker.pkg.dev/palette-images/palette/spectro-cleanup
- tag: "1.0.3"
- networkAttachDef:
- create: false
- # a json string to apply
- config: ""
- # a sample config
- # '{
- # "cniVersion": "0.3.0",
- # "type": "macvlan",
- # "master": "ens5",
- # "mode": "bridge",
- # "ipam": {
- # "type": "host-local",
- # "subnet": "192.168.1.0/24",
- # "rangeStart": "192.168.1.200",
- # "rangeEnd": "192.168.1.216",
- # "routes": [
- # { "dst": "0.0.0.0/0" }
- # ],
- # "gateway": "192.168.1.1"
- # }
- # }'
- descheduler:
- enabled: true
- namespace: "kube-system"
- # CronJob or Deployment
- kind: CronJob
- image:
- repository: registry.k8s.io/descheduler/descheduler
- # Overrides the image tag whose default is the chart version
- tag: "v0.32.0"
- pullPolicy: IfNotPresent
- imagePullSecrets: # - name: container-registry-secret
- resources:
- requests:
- cpu: 500m
- memory: 256Mi
- limits:
- cpu: 500m
- memory: 256Mi
- ports:
- - containerPort: 10258
- protocol: TCP
- securityContext:
- allowPrivilegeEscalation: false
- capabilities:
- drop:
- - ALL
- privileged: false
- readOnlyRootFilesystem: true
- runAsNonRoot: true
- runAsUser: 1000
- # podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
- podSecurityContext: {} # fsGroup: 1000
- nameOverride: ""
- fullnameOverride: "descheduler"
- # -- Override the deployment namespace; defaults to .Release.Namespace
- namespaceOverride: ""
- # labels that'll be applied to all resources
- commonLabels: {}
- cronJobApiVersion: "batch/v1"
- schedule: "*/15 * * * *"
- suspend: false
- # startingDeadlineSeconds: 200
- # successfulJobsHistoryLimit: 3
- # failedJobsHistoryLimit: 1
- # ttlSecondsAfterFinished 600
- # timeZone: Etc/UTC
+ ## The CDI CR that gets created
+ cdiResource:
+ additionalFeatureGates: # - FeatureName
+ additionalConfig:
+ filesystemOverhead:
+ global: "0.08"
+ storageClass:
+ spectro-storage-class: "0.08"
+ podResourceRequirements:
+ requests:
+ cpu: 250m
+ memory: 1G
+ limits:
+ cpu: 1
+ memory: 8G
+ insecureRegistries: [] # List of insecure registries to allow in the CDI importer, preffered in air-gapped environments
+ importProxy:
+ #HTTPProxy: "http://username:password@your-proxy-server:3128"
+ #HTTPSProxy: "http://username:password@your-proxy-server:3128"
+ #noProxy: "127.0.0.1,localhost,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.company.local"
+ #TrustedCAProxy: configmap-name # optional: the ConfigMap name of an user-provided trusted certificate authority (CA) bundle to be added to the importer pod CA bundle
+ additionalSpec:
+ infra:
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+ workload:
+ nodeSelector:
+ kubernetes.io/os: linux
+ imagePullPolicy: IfNotPresent
+ multus:
+ enabled: true
+ image:
+ repository: ghcr.io/k8snetworkplumbingwg/multus-cni
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: "v4.1.4-thick"
+ networkController:
+ criSocket:
+ enableK3SHostPath: false # true for K3S and RKE2, false for PXK-E
+ paletteAgentMode: false # true for running Palette Agent Mode clusters with PXK-E
+ # criSocketHostPathOverride: /run/containerd/containerd.sock
+ imagePullSecrets: []
+ podAnnotations: {}
+ resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ limits:
+ cpu: 100m
+ memory: 1Gi
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ nodeSelector: {}
+ affinity: {}
+ dpdkCompatibility: false
+ cleanup:
+ image: us-docker.pkg.dev/palette-images/palette/spectro-cleanup
+ tag: "1.0.3"
+ networkAttachDef:
+ create: false
+ # a json string to apply
+ config: ""
+ # a sample config
+ # '{
+ # "cniVersion": "0.3.0",
+ # "type": "macvlan",
+ # "master": "ens5",
+ # "mode": "bridge",
+ # "ipam": {
+ # "type": "host-local",
+ # "subnet": "192.168.1.0/24",
+ # "rangeStart": "192.168.1.200",
+ # "rangeEnd": "192.168.1.216",
+ # "routes": [
+ # { "dst": "0.0.0.0/0" }
+ # ],
+ # "gateway": "192.168.1.1"
+ # }
+ # }'
+ descheduler:
+ enabled: true
+ namespace: "kube-system"
+ # CronJob or Deployment
+ kind: CronJob
+ image:
+ repository: registry.k8s.io/descheduler/descheduler
+ # Overrides the image tag whose default is the chart version
+ tag: "v0.32.0"
+ pullPolicy: IfNotPresent
+ imagePullSecrets: # - name: container-registry-secret
+ resources:
+ requests:
+ cpu: 500m
+ memory: 256Mi
+ limits:
+ cpu: 500m
+ memory: 256Mi
+ ports:
+ - containerPort: 10258
+ protocol: TCP
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ privileged: false
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ # podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
+ podSecurityContext: {} # fsGroup: 1000
+ nameOverride: ""
+ fullnameOverride: "descheduler"
+ # -- Override the deployment namespace; defaults to .Release.Namespace
+ namespaceOverride: ""
+ # labels that'll be applied to all resources
+ commonLabels: {}
+ cronJobApiVersion: "batch/v1"
+ schedule: "*/15 * * * *"
+ suspend: false
+ # startingDeadlineSeconds: 200
+ # successfulJobsHistoryLimit: 3
+ # failedJobsHistoryLimit: 1
+ # ttlSecondsAfterFinished 600
+ # timeZone: Etc/UTC
- # Required when running as a Deployment
- deschedulingInterval: 15m
- # Specifies the replica count for Deployment
- # Set leaderElection if you want to use more than 1 replica
- # Set affinity.podAntiAffinity rule if you want to schedule onto a node
- # only if that node is in the same zone as at least one already-running descheduler
- replicas: 1
- # Specifies whether Leader Election resources should be created
- # Required when running as a Deployment
- # NOTE: Leader election can't be activated if DryRun enabled
- leaderElection: {}
- # enabled: true
- # leaseDuration: 15s
- # renewDeadline: 10s
- # retryPeriod: 2s
- # resourceLock: "leases"
- # resourceName: "descheduler"
- # resourceNamespace: "kube-system"
+ # Required when running as a Deployment
+ deschedulingInterval: 15m
+ # Specifies the replica count for Deployment
+ # Set leaderElection if you want to use more than 1 replica
+ # Set affinity.podAntiAffinity rule if you want to schedule onto a node
+ # only if that node is in the same zone as at least one already-running descheduler
+ replicas: 1
+ # Specifies whether Leader Election resources should be created
+ # Required when running as a Deployment
+ # NOTE: Leader election can't be activated if DryRun enabled
+ leaderElection: {}
+ # enabled: true
+ # leaseDuration: 15s
+ # renewDeadline: 10s
+ # retryPeriod: 2s
+ # resourceLock: "leases"
+ # resourceName: "descheduler"
+ # resourceNamespace: "kube-system"
- command:
- - "/bin/descheduler"
- cmdOptions:
- v: 3
- # Recommended to use the latest Policy API version supported by the Descheduler app version
- deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
- # deschedulerPolicy contains the policies the descheduler executes.
- # To use policies stored in an existing configMap use:
- # NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
- # deschedulerPolicy: {}
- deschedulerPolicy:
- nodeSelector: kubevirt.io/schedulable=true
- maxNoOfPodsToEvictPerNode: 10
- # maxNoOfPodsToEvictPerNamespace: 10
- metricsCollector:
- enabled: true
- # ignorePvcPods: true
- # evictLocalStoragePods: true
- # evictDaemonSetPods: true
- # tracing:
- # collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
- # transportCert: ""
- # serviceName: ""
- # serviceNamespace: ""
- # sampleRate: 1.0
- # fallbackToNoOpProviderOnError: true
- profiles:
- - name: default
- pluginConfig:
- - name: DefaultEvictor
- args:
- ignorePvcPods: true
- evictLocalStoragePods: true
- nodeFit: true
- ignorePodsWithoutPDB: true
- - name: RemoveDuplicates
- - name: RemovePodsHavingTooManyRestarts
- args:
- podRestartThreshold: 100
- includingInitContainers: true
- - name: RemovePodsViolatingNodeAffinity
- args:
- nodeAffinityType:
- - requiredDuringSchedulingIgnoredDuringExecution
- - name: RemovePodsViolatingNodeTaints
- args:
- excludedTaints:
- - node.kubernetes.io/unschedulable
- - name: RemovePodsViolatingInterPodAntiAffinity
- - name: RemovePodsViolatingTopologySpreadConstraint
- - name: LowNodeUtilization
- args:
- thresholds:
- cpu: 20
- memory: 25
- pods: 100
- targetThresholds:
- cpu: 60
- memory: 75
- pods: 100
- metricsUtilization:
- metricsServer: true
- evictableNamespaces:
- exclude:
- - "cert-manager"
- - "kube-system"
- - "palette-system"
- - "metallb-system"
- - "cluster-{{ .spectro.system.cluster.uid }}"
- - "kubevirt"
- - "monitoring"
- - "nginx"
- - "vm-dashboard"
- plugins:
- balance:
- enabled:
- - RemoveDuplicates
- - RemovePodsViolatingTopologySpreadConstraint
- - LowNodeUtilization
- deschedule:
- enabled:
- - RemovePodsHavingTooManyRestarts
- - RemovePodsViolatingNodeTaints
- - RemovePodsViolatingNodeAffinity
- - RemovePodsViolatingInterPodAntiAffinity
- priorityClassName: system-cluster-critical
- nodeSelector: {}
- # foo: bar
+ command:
+ - "/bin/descheduler"
+ cmdOptions:
+ v: 3
+ # Recommended to use the latest Policy API version supported by the Descheduler app version
+ deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
+ # deschedulerPolicy contains the policies the descheduler executes.
+ # To use policies stored in an existing configMap use:
+ # NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
+ # deschedulerPolicy: {}
+ deschedulerPolicy:
+ nodeSelector: kubevirt.io/schedulable=true
+ maxNoOfPodsToEvictPerNode: 10
+ # maxNoOfPodsToEvictPerNamespace: 10
+ metricsCollector:
+ enabled: true
+ # ignorePvcPods: true
+ # evictLocalStoragePods: true
+ # evictDaemonSetPods: true
+ # tracing:
+ # collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
+ # transportCert: ""
+ # serviceName: ""
+ # serviceNamespace: ""
+ # sampleRate: 1.0
+ # fallbackToNoOpProviderOnError: true
+ profiles:
+ - name: default
+ pluginConfig:
+ - name: DefaultEvictor
+ args:
+ ignorePvcPods: true
+ evictLocalStoragePods: true
+ nodeFit: true
+ ignorePodsWithoutPDB: true
+ - name: RemoveDuplicates
+ - name: RemovePodsHavingTooManyRestarts
+ args:
+ podRestartThreshold: 100
+ includingInitContainers: true
+ - name: RemovePodsViolatingNodeAffinity
+ args:
+ nodeAffinityType:
+ - requiredDuringSchedulingIgnoredDuringExecution
+ - name: RemovePodsViolatingNodeTaints
+ args:
+ excludedTaints:
+ - node.kubernetes.io/unschedulable
+ - name: RemovePodsViolatingInterPodAntiAffinity
+ - name: RemovePodsViolatingTopologySpreadConstraint
+ - name: LowNodeUtilization
+ args:
+ thresholds:
+ cpu: 20
+ memory: 25
+ pods: 100
+ targetThresholds:
+ cpu: 60
+ memory: 75
+ pods: 100
+ metricsUtilization:
+ metricsServer: true
+ evictableNamespaces:
+ exclude:
+ - "cert-manager"
+ - "kube-system"
+ - "palette-system"
+ - "metallb-system"
+ - "cluster-{{ .spectro.system.cluster.uid }}"
+ - "kubevirt"
+ - "monitoring"
+ - "nginx"
+ - "vm-dashboard"
+ plugins:
+ balance:
+ enabled:
+ - RemoveDuplicates
+ - RemovePodsViolatingTopologySpreadConstraint
+ - LowNodeUtilization
+ deschedule:
+ enabled:
+ - RemovePodsHavingTooManyRestarts
+ - RemovePodsViolatingNodeTaints
+ - RemovePodsViolatingNodeAffinity
+ - RemovePodsViolatingInterPodAntiAffinity
+ priorityClassName: system-cluster-critical
+ nodeSelector: {}
+ # foo: bar
- affinity: {}
- # nodeAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # nodeSelectorTerms:
- # - matchExpressions:
- # - key: kubernetes.io/e2e-az-name
- # operator: In
- # values:
- # - e2e-az1
- # - e2e-az2
- # podAntiAffinity:
- # requiredDuringSchedulingIgnoredDuringExecution:
- # - labelSelector:
- # matchExpressions:
- # - key: app.kubernetes.io/name
- # operator: In
- # values:
- # - descheduler
- # topologyKey: "kubernetes.io/hostname"
- topologySpreadConstraints: []
- # - maxSkew: 1
- # topologyKey: kubernetes.io/hostname
- # whenUnsatisfiable: DoNotSchedule
- # labelSelector:
- # matchLabels:
- # app.kubernetes.io/name: descheduler
- tolerations: []
- # - key: 'management'
- # operator: 'Equal'
- # value: 'tool'
- # effect: 'NoSchedule'
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+ # podAntiAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # - labelSelector:
+ # matchExpressions:
+ # - key: app.kubernetes.io/name
+ # operator: In
+ # values:
+ # - descheduler
+ # topologyKey: "kubernetes.io/hostname"
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: kubernetes.io/hostname
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app.kubernetes.io/name: descheduler
+ tolerations: []
+ # - key: 'management'
+ # operator: 'Equal'
+ # value: 'tool'
+ # effect: 'NoSchedule'
- rbac:
- # Specifies whether RBAC resources should be created
- create: true
- serviceAccount:
- # Specifies whether a ServiceAccount should be created
- create: true
- # The name of the ServiceAccount to use.
- # If not set and create is true, a name is generated using the fullname template
- name: # Specifies custom annotations for the serviceAccount
- annotations: {}
- podAnnotations: {}
- podLabels:
- spectrocloud.com/connection: proxy
- dnsConfig: {}
- livenessProbe:
- failureThreshold: 3
- httpGet:
- path: /healthz
- port: 10258
- scheme: HTTPS
- initialDelaySeconds: 3
- periodSeconds: 10
- service:
- enabled: false
- # @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
- #
- ipFamilyPolicy: ""
- # @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
- # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
- # E.g.
- # ipFamilies:
- # - IPv6
- # - IPv4
- ipFamilies: []
- serviceMonitor:
- enabled: false
- # The namespace where Prometheus expects to find service monitors.
- # namespace: ""
- # Add custom labels to the ServiceMonitor resource
- additionalLabels: {} # prometheus: kube-prometheus-stack
- interval: ""
- # honorLabels: true
- insecureSkipVerify: true
- serverName: null
- metricRelabelings: []
- # - action: keep
- # regex: 'descheduler_(build_info|pods_evicted)'
- # sourceLabels: [__name__]
- relabelings: []
- # - sourceLabels: [__meta_kubernetes_pod_node_name]
- # separator: ;
- # regex: ^(.*)$
- # targetLabel: nodename
- # replacement: $1
- # action: replace
\ No newline at end of file
+ rbac:
+ # Specifies whether RBAC resources should be created
+ create: true
+ serviceAccount:
+ # Specifies whether a ServiceAccount should be created
+ create: true
+ # The name of the ServiceAccount to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: # Specifies custom annotations for the serviceAccount
+ annotations: {}
+ podAnnotations: {}
+ podLabels:
+ spectrocloud.com/connection: proxy
+ dnsConfig: {}
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /healthz
+ port: 10258
+ scheme: HTTPS
+ initialDelaySeconds: 3
+ periodSeconds: 10
+ service:
+ enabled: false
+ # @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
+ #
+ ipFamilyPolicy: ""
+ # @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
+ # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
+ # E.g.
+ # ipFamilies:
+ # - IPv6
+ # - IPv4
+ ipFamilies: []
+ serviceMonitor:
+ enabled: false
+ # The namespace where Prometheus expects to find service monitors.
+ # namespace: ""
+ # Add custom labels to the ServiceMonitor resource
+ additionalLabels: {} # prometheus: kube-prometheus-stack
+ interval: ""
+ # honorLabels: true
+ insecureSkipVerify: true
+ serverName: null
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'descheduler_(build_info|pods_evicted)'
+ # sourceLabels: [__name__]
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
\ No newline at end of file
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 0eee3c6..885a4d4 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -11,7 +11,7 @@ palette-project = "Default" # The name of your project in Palette.
############################
deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
-deploy-maas-vm = false # Set to true to create a VM on MAAS VMO cluster once deployed.
+deploy-maas-vm = true # Set to true to create a VM on MAAS VMO cluster once deployed.
pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
@@ -62,8 +62,8 @@ node-network = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for u
# virtual_machines.tf
#####################
vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
-vm-deploy-name = "vmo-vm" # The name of your VM
-vm-labels = ["my-vmo-vm"] # Labels that will be applied to your VM.
+vm-deploy-name = "tf-new-template" # The name of your VM
+vm-labels = ["tf-new-tamplate"] # Labels that will be applied to your VM.
vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
vm-cpu-cores = 2 # Number of CPU cores your VM will have.
vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
@@ -73,4 +73,4 @@ vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will h
#####################
# vmo-extras-manifest.yaml
#####################
-palette-user-id = "kenneth.heaslip@spectrocloud.com"
+palette-user-id = "kenneth.heaslip@spectrocloud.com"
diff --git a/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
index e43a404..dda2366 100644
--- a/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
@@ -26,10 +26,5 @@ run "verify_maas" {
command = plan
expect_failures = [
- var.vm-deploy-namespace,
- var.vm-deploy-name,
- var.vm-labels,
- var.vm-storage-Gi,
- var.vm-memory-Gi,
]
}
diff --git a/terraform/vmo-cluster/virtual-machines/cloud-init b/terraform/vmo-cluster/virtual-machines/cloud-init
index 935c9a1..ae54349 100644
--- a/terraform/vmo-cluster/virtual-machines/cloud-init
+++ b/terraform/vmo-cluster/virtual-machines/cloud-init
@@ -4,19 +4,19 @@ chpasswd: { expire: False }
password: spectro
disable_root: false
runcmd:
- - apt-get update
- - apt-get install -y qemu-guest-agent
- - systemctl start qemu-guest-agent
- - |
+- apt-get update
+- apt-get install -y qemu-guest-agent
+- systemctl start qemu-guest-agent
+- |
apt-get -y install ca-certificates curl
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
echo \
- "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
- $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
- tee /etc/apt/sources.list.d/docker.list > /dev/null
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
groupadd docker
- gpasswd -a ubuntu docker
+ gpasswd -a ubuntu docker
\ No newline at end of file
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
index d8c62af..cc45d51 100644
--- a/terraform/vmo-cluster/virtual_machines.tf
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -50,7 +50,7 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
volume {
- name = "ubuntu-2204"
+ name = "ubuntu-2204-vm"
volume_source {
data_volume {
name = "ubuntu-2204"
@@ -68,7 +68,7 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
}
disk {
- name = "ubuntu-2204"
+ name = "ubuntu-2204-vm"
disk_device {
disk {
bus = "virtio"
diff --git a/terraform/vmo-cluster/vm-test.yaml b/terraform/vmo-cluster/vm-test.yaml
new file mode 100644
index 0000000..5a34da2
--- /dev/null
+++ b/terraform/vmo-cluster/vm-test.yaml
@@ -0,0 +1,87 @@
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+ labels:
+ kubevirt.io/vm: tf-base-template-always-2
+ name: tf-base-template-always-2
+ namespace: virtual-machines
+spec:
+ dataVolumeTemplates:
+ - metadata:
+ name: tf-base-template-always-2-ubuntu
+ spec:
+ pvc:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: 50Gi
+ storageClassName: ceph-block
+ volumeMode: Block
+ source:
+ pvc:
+ name: template-ubuntu-2204
+ namespace: vmo-golden-images
+ template:
+ metadata:
+ annotations:
+ descheduler.alpha.kubernetes.io/evict: 'true'
+ labels:
+ kubevirt.io/domain: hellouni
+ kubevirt.io/size: small
+ spec:
+ domain:
+ cpu:
+ cores: 2
+ sockets: 1
+ threads: 1
+ devices:
+ disks:
+ - disk:
+ bus: virtio
+ name: datavolume-os
+ - disk:
+ bus: virtio
+ name: cloudinitdisk
+ interfaces:
+ - masquerade: {}
+ model: virtio
+ name: default
+ macAddress: 02:D1:47:31:59:20
+ machine:
+ type: q35
+ memory:
+ guest: 2Gi
+ networks:
+ - name: default
+ pod: {}
+ volumes:
+ - dataVolume:
+ name: tf-base-template-always-2-ubuntu
+ name: datavolume-os
+ - cloudInitNoCloud:
+ userData: |
+ #cloud-config
+ ssh_pwauth: True
+ chpasswd: { expire: False }
+ password: spectro
+ disable_root: false
+ runcmd:
+ - apt-get update
+ - apt-get install -y qemu-guest-agent
+ - systemctl start qemu-guest-agent
+ - |
+ apt-get -y install ca-certificates curl
+ install -m 0755 -d /etc/apt/keyrings
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ chmod a+r /etc/apt/keyrings/docker.asc
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+ apt-get update
+ apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ groupadd docker
+ gpasswd -a ubuntu docker
+ name: cloudinitdisk
+ runStrategy: Always
\ No newline at end of file
From defb64d68f31f00406f6aae6ecc68b69b2f57097 Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Mon, 16 Jun 2025 13:49:00 -0400
Subject: [PATCH 08/11] RC2
---
terraform/vmo-cluster/terraform.tfvars | 4 ----
1 file changed, 4 deletions(-)
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 885a4d4..49d6978 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -70,7 +70,3 @@ vm-cpu-sockets = 1 # Number of physical CPU sockets the CP
vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
-#####################
-# vmo-extras-manifest.yaml
-#####################
-palette-user-id = "kenneth.heaslip@spectrocloud.com"
From cd5867fca6831bb72064345acf00738aaa0919ab Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Mon, 16 Jun 2025 14:06:51 -0400
Subject: [PATCH 09/11] RC3
---
.../manifests/vmo-extras-manifest copy.yaml | 132 ------------------
terraform/vmo-cluster/terraform.tfvars | 30 ++--
terraform/vmo-cluster/vm-test.yaml | 87 ------------
3 files changed, 15 insertions(+), 234 deletions(-)
delete mode 100644 terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml
delete mode 100644 terraform/vmo-cluster/vm-test.yaml
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml
deleted file mode 100644
index 6244567..0000000
--- a/terraform/vmo-cluster/manifests/vmo-extras-manifest copy.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-apiVersion: spectrocloud.com/v1
-kind: VmTemplate
-metadata:
- name: ubuntu-2204
-spec:
- description: Ubuntu 22.04
- displayName: Ubuntu 22.04
- icon: https://s3.amazonaws.com/manifests.spectrocloud.com/logos/ubuntu.png
- dataVolumeTemplates:
- - metadata:
- name: ubuntu-2204
- spec:
- source:
- pvc:
- name: template-ubuntu-2204
- namespace: vmo-golden-images
- pvc:
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 50Gi
- volumeMode: Block
- storageClassName: ceph-block
- template:
- metadata:
- annotations:
- descheduler.alpha.kubernetes.io/evict: "true"
- labels:
- kubevirt.io/size: small
- kubevirt.io/domain: hellouni
- spec:
- domain:
- cpu:
- cores: 2
- sockets: 1
- threads: 1
- devices:
- disks:
- - disk:
- bus: virtio
- name: datavolume-os
- - disk:
- bus: virtio
- name: cloudinitdisk
- interfaces:
- - masquerade: {}
- name: default
- model: virtio
- #macAddress: '00:5e:ab:cd:ef:01'
- machine:
- type: q35
- resources:
- limits:
- memory: 2Gi
- requests:
- memory: 2Gi
- networks:
- - name: default
- pod: {}
- volumes:
- - dataVolume:
- name: ubuntu-2204
- name: datavolume-os
- - cloudInitNoCloud:
- userData: |
- #cloud-config
- ssh_pwauth: True
- chpasswd: { expire: False }
- password: spectro
- disable_root: false
- runcmd:
- - apt-get update
- - apt-get install -y qemu-guest-agent
- - systemctl start qemu-guest-agent
- - |
- apt-get -y install ca-certificates curl
- install -m 0755 -d /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
- chmod a+r /etc/apt/keyrings/docker.asc
- echo \
- "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
- $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
- tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- groupadd docker
- gpasswd -a ubuntu docker
- name: cloudinitdisk
----
-apiVersion: cdi.kubevirt.io/v1beta1
-kind: DataVolume
-metadata:
- name: "template-ubuntu-2204"
- namespace: "vmo-golden-images"
- annotations:
- cdi.kubevirt.io/storage.deleteAfterCompletion: "false"
- cdi.kubevirt.io/storage.bind.immediate.requested: ""
-spec:
- storage:
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 50Gi
- volumeMode: Block
- storageClassName: ceph-block
- source:
- registry:
- url: "docker://gcr.io/spectro-images-public/release/vm-dashboard/os/ubuntu-container-disk:22.04"
----
-apiVersion: cdi.kubevirt.io/v1beta1
-kind: StorageProfile
-metadata:
- name: ceph-filesystem
-spec:
- claimPropertySets:
- - accessModes:
- - ReadWriteMany
- volumeMode: Filesystem
- cloneStrategy: csi-clone
----
-apiVersion: cdi.kubevirt.io/v1beta1
-kind: StorageProfile
-metadata:
- name: ceph-block
-spec:
- claimPropertySets:
- - accessModes:
- - ReadWriteMany
- volumeMode: Block
- cloneStrategy: csi-clone
\ No newline at end of file
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index 49d6978..e01a6c0 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -10,20 +10,20 @@ palette-project = "Default" # The name of your project in Palette.
# MAAS Deployment Settings
############################
-deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
-deploy-maas-vm = true # Set to true to create a VM on MAAS VMO cluster once deployed.
-pcg-name = "maas-pcg" # Provide the name of the PCG that will be used to deploy the Palette cluster.
-maas-domain = "maas.sc" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+deploy-maas = false # Set to true to deploy to a new VMO cluster to MAAS.
+deploy-maas-vm = false # Set to true to create a VM on MAAS VMO cluster once deployed.
+pcg-name = "REPLACE ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "REPLACE ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
-maas-control-plane-resource-pool = "docs" # Provide a resource pool for the control plane nodes.
-maas-control-plane-azs = ["az1"] # Provide a set of availability zones for the control plane nodes.
-maas-control-plane-node-tags = ["docs-cp"] # Provide a set of node tags for the control plane nodes.
+maas-control-plane-resource-pool = "REPLACE ME" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["REPLACE ME"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["REPLACE ME"] # Provide a set of node tags for the control plane nodes.
ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
ctl-node-min-memory-mb = 8096 # Minimum amount of RAM (memory) required for control plane nodes
maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
-maas-worker-resource-pool = "docs" # Provide a resource pool for the worker nodes.
-maas-worker-azs = ["az1"] # Provide a set of availability zones for the worker nodes.
-maas-worker-node-tags = ["docs"] # Provide a set of node tags for the worker nodes.
+maas-worker-resource-pool = "REPLACE ME" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["REPLACE ME"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["REPLACE ME"] # Provide a set of node tags for the worker nodes.
wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
@@ -31,7 +31,7 @@ wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory)
# cluster_profiles.tf
#####################
-vmo-cluster-name = "vmo-cluster-maas"
+vmo-cluster-name = "REPLACE ME"
cluster-profile-type = "cluster" # infra, cluster, add-on, or system
cluster-profile-version = "1.0.0" # Version number for the cluster profile in Palette
@@ -51,19 +51,19 @@ cluster-services-CIDR = ["100.64.64.0/18"]
###############################
# manifests/metallb-values.yaml
###############################
-metallb-ip-pool = ["10.11.130.129-10.11.130.131"] # IP addresses to be assigned for use by MetalLB
+metallb-ip-pool = ["REPLACE ME"] # IP addresses to be assigned for use by MetalLB
###############################
# manifests/ubuntu-values.yaml
###############################
-node-network = "10.11.130.129-10.11.130.131" # IP addresses to be assigned for use by MetalLB
+node-network = "REPLACE ME" # IP addresses to be assigned for use by MetalLB
#####################
# virtual_machines.tf
#####################
vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
-vm-deploy-name = "tf-new-template" # The name of your VM
-vm-labels = ["tf-new-tamplate"] # Labels that will be applied to your VM.
+vm-deploy-name = "REPLACE ME" # The name of your VM
+vm-labels = ["REPLACE ME"] # Labels that will be applied to your VM.
vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
vm-cpu-cores = 2 # Number of CPU cores your VM will have.
vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
diff --git a/terraform/vmo-cluster/vm-test.yaml b/terraform/vmo-cluster/vm-test.yaml
deleted file mode 100644
index 5a34da2..0000000
--- a/terraform/vmo-cluster/vm-test.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-apiVersion: kubevirt.io/v1
-kind: VirtualMachine
-metadata:
- labels:
- kubevirt.io/vm: tf-base-template-always-2
- name: tf-base-template-always-2
- namespace: virtual-machines
-spec:
- dataVolumeTemplates:
- - metadata:
- name: tf-base-template-always-2-ubuntu
- spec:
- pvc:
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 50Gi
- storageClassName: ceph-block
- volumeMode: Block
- source:
- pvc:
- name: template-ubuntu-2204
- namespace: vmo-golden-images
- template:
- metadata:
- annotations:
- descheduler.alpha.kubernetes.io/evict: 'true'
- labels:
- kubevirt.io/domain: hellouni
- kubevirt.io/size: small
- spec:
- domain:
- cpu:
- cores: 2
- sockets: 1
- threads: 1
- devices:
- disks:
- - disk:
- bus: virtio
- name: datavolume-os
- - disk:
- bus: virtio
- name: cloudinitdisk
- interfaces:
- - masquerade: {}
- model: virtio
- name: default
- macAddress: 02:D1:47:31:59:20
- machine:
- type: q35
- memory:
- guest: 2Gi
- networks:
- - name: default
- pod: {}
- volumes:
- - dataVolume:
- name: tf-base-template-always-2-ubuntu
- name: datavolume-os
- - cloudInitNoCloud:
- userData: |
- #cloud-config
- ssh_pwauth: True
- chpasswd: { expire: False }
- password: spectro
- disable_root: false
- runcmd:
- - apt-get update
- - apt-get install -y qemu-guest-agent
- - systemctl start qemu-guest-agent
- - |
- apt-get -y install ca-certificates curl
- install -m 0755 -d /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
- chmod a+r /etc/apt/keyrings/docker.asc
- echo \
- "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
- $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
- tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- groupadd docker
- gpasswd -a ubuntu docker
- name: cloudinitdisk
- runStrategy: Always
\ No newline at end of file
From 6b9fc2789afe7a3cf51023db2f408c299791530e Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Tue, 17 Jun 2025 13:20:54 -0400
Subject: [PATCH 10/11] feat: vmo with terraform
---
README.md | 2 +
terraform/vmo-cluster/README.md | 41 +++--
terraform/vmo-cluster/cluster_profiles.tf | 36 ++---
terraform/vmo-cluster/clusters.tf | 10 +-
terraform/vmo-cluster/data.tf | 2 +-
terraform/vmo-cluster/inputs.tf | 150 ++++++------------
.../vmo-cluster/manifests/metallb-values.yaml | 2 +-
.../vmo-cluster/manifests/ubuntu-values.yaml | 2 +-
.../manifests/vmo-extras-manifest.yaml | 30 +++-
.../vmo-cluster/manifests/vmo-values.yaml | 6 +-
terraform/vmo-cluster/terraform.tfvars | 73 ++++-----
11 files changed, 149 insertions(+), 205 deletions(-)
diff --git a/README.md b/README.md
index bdbda40..3cf76d1 100644
--- a/README.md
+++ b/README.md
@@ -24,6 +24,8 @@ The following tutorial code is available:
- [getting-started-deployment-tf](./terraform/getting-started-deployment-tf/README.md) - Learn how to deploy and update a cluster to AWS, Azure, GCP, and VMware vSphere with Palette. [Link](https://docs.spectrocloud.com/getting-started/terraform)
+- [maas-vmo-cluster-deployment-tf](./terraform/vmo-cluster/README.md) - Learn how to deploy a VMO cluster to MAAS and deploy VMs with Palette. [Link](https://docs.spectrocloud.com/tutorials/vmo/vmo-maas)
+
## Docker
All the tutorials are available in a Docker image that you can use to get started with the tutorials.
diff --git a/terraform/vmo-cluster/README.md b/terraform/vmo-cluster/README.md
index df72fa0..6cd8857 100644
--- a/terraform/vmo-cluster/README.md
+++ b/terraform/vmo-cluster/README.md
@@ -1,6 +1,6 @@
# Deploy and Manage VMs using Palette VMO
-This folder contains the demo code for the **Deploy and Manage VMs using Palette VMO** tutorial.
+This folder contains the demo code for the [Deploy and Manage VMs using Palette VMO](https://docs.spectrocloud.com/tutorials/vmo/vmo-maas/) tutorial.
The Terraform code has two main toggle variables that you can use to deploy resources to [Canonical MAAS](https://maas.io/docs).
@@ -10,7 +10,8 @@ The Terraform code has two main toggle variables that you can use to deploy reso
| `deploy-maas-vm` | MAAS | Enable to deploy a VM to a deployed MAAS cluster. | `false` |
-To get started, open the **terraform.tfvars** file. Toggle the provider variable as specified in the table and provide values to your cloud provider variables, replacing all instances of the string `REPLACE ME`.
+To get started, open the **terraform.tfvars** file. Toggle the provider variable as specified in the table and provide values to your cloud provider variables, replacing all instances of the string `REPLACE_ME`.
+
## Requirements
@@ -53,43 +54,37 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
-| [cluster-profile-type](#input\_cluster-profile-type) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
-| [cluster-profile-version](#input\_cluster-profile-version) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
-| [cluster-services-CIDR](#input\_cluster-services-CIDR) | CIDR notation subnets for cluster services ex. 192.168.1.0/24. | `set(string)` | n/a | yes |
-| [ctl-node-min-cpu](#input\_ctl-node-min-cpu) | Minimum number of CPU cores allocated to the Control Plane node. | `number` | n/a | yes |
-| [ctl-node-min-memory-mb](#input\_ctl-node-min-memory-mb) | Minimum amount of RAM allocated to the Control Plane node. | `number` | n/a | yes |
| [deploy-maas](#input\_deploy-maas) | A flag for enabling a deployment on MAAS. | `bool` | n/a | yes |
| [deploy-maas-vm](#input\_deploy-maas-vm) | A flag for enabling a VM creation on the MAAS cluster. | `bool` | n/a | yes |
-| [host-vlans](#input\_host-vlans) | Node Allowed VLANs | `number` | `1` | no |
+| [host-vlans](#input\_host-vlans) | Node Allowed VLANs | `string` | `"1"` | no |
+| [maas-control-node-min-cpu](#input\_maas-control-node-min-cpu) | Minimum number of CPU cores allocated to the Control Plane node. | `number` | n/a | yes |
+| [maas-control-node-min-memory-mb](#input\_maas-control-node-min-memory-mb) | Minimum amount of RAM allocated to the Control Plane node. | `number` | n/a | yes |
| [maas-control-plane-azs](#input\_maas-control-plane-azs) | Set of AZs for the MAAS control plane nodes. | `set(string)` | n/a | yes |
| [maas-control-plane-node-tags](#input\_maas-control-plane-node-tags) | Set of node tags for the MAAS control plane nodes. | `set(string)` | n/a | yes |
-| [maas-control-plane-nodes](#input\_maas-control-plane-nodes) | Number of MaaS control plane nodes | `number` | `1` | no |
| [maas-control-plane-resource-pool](#input\_maas-control-plane-resource-pool) | Resource pool for the MAAS control plane nodes. | `string` | n/a | yes |
| [maas-domain](#input\_maas-domain) | MAAS domain | `string` | n/a | yes |
| [maas-worker-azs](#input\_maas-worker-azs) | Set of AZs for the MAAS worker nodes. | `set(string)` | n/a | yes |
+| [maas-worker-node-min-cpu](#input\_maas-worker-node-min-cpu) | Minimum number of CPU cores allocated to the worker node. | `number` | n/a | yes |
+| [maas-worker-node-min-memory-mb](#input\_maas-worker-node-min-memory-mb) | Minimum amount of RAM allocated to the worker node. | `number` | n/a | yes |
| [maas-worker-node-tags](#input\_maas-worker-node-tags) | Set of node tags for the MAAS worker nodes. | `set(string)` | n/a | yes |
-| [maas-worker-nodes](#input\_maas-worker-nodes) | Number of MaaS worker nodes | `number` | `1` | no |
| [maas-worker-resource-pool](#input\_maas-worker-resource-pool) | Resource pool for the MAAS worker nodes. | `string` | n/a | yes |
-| [metallb-ip-pool](#input\_metallb-ip-pool) | CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255 | `set(string)` | n/a | yes |
+| [metallb-ip-pool](#input\_metallb-ip-pool) | CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255 | `string` | n/a | yes |
| [node-network](#input\_node-network) | The subnet the Ubuntu nodes will use. | `string` | n/a | yes |
| [palette-project](#input\_palette-project) | The name of your project in Palette. | `string` | n/a | yes |
-| [palette-user-id](#input\_palette-user-id) | The amount of storage to provision for your VM in Gi. | `string` | n/a | yes |
+| [palette-user-id](#input\_palette-user-id) | The name of your project in Palette. | `string` | n/a | yes |
| [pcg-name](#input\_pcg-name) | The name of the PCG that will be used to deploy the cluster. | `string` | n/a | yes |
-| [pod-CIDR](#input\_pod-CIDR) | CIDR notation subnets for the pd network ex. 192.168.1.0/24. | `set(string)` | n/a | yes |
| [tags](#input\_tags) | The default tags to apply to Palette resources. | `list(string)` | [
"spectro-cloud-education",
"spectrocloud:tutorials",
"terraform_managed:true",
"tutorial:vmo-cluster-deployment"
]
| no |
| [vm-cpu-cores](#input\_vm-cpu-cores) | Number of CPU cores to allocate to your VM. | `number` | `1` | no |
-| [vm-cpu-sockets](#input\_vm-cpu-sockets) | Number of CPU cores to allocate to your VM. | `number` | `1` | no |
-| [vm-cpu-threads](#input\_vm-cpu-threads) | Number of CPU cores to allocate to your VM. | `number` | `1` | no |
-| [vm-deploy-name](#input\_vm-deploy-name) | The namespace where your VMs will be deployed. | `string` | n/a | yes |
-| [vm-deploy-namespace](#input\_vm-deploy-namespace) | The namespace where your VMs will be deployed. | `string` | n/a | yes |
-| [vm-labels](#input\_vm-labels) | The namespace where your VMs will be deployed. | `set(string)` | n/a | yes |
+| [vm-cpu-sockets](#input\_vm-cpu-sockets) | Number of CPU sockets the assigned CPU cores should be spread across. | `number` | `1` | no |
+| [vm-cpu-threads](#input\_vm-cpu-threads) | Number of CPU threads your VM can use. | `number` | `1` | no |
+| [vm-deploy-name](#input\_vm-deploy-name) | The namespace where your VMs will be deployed. | `string` | `"vmo-tutorial-vm"` | no |
+| [vm-deploy-namespace](#input\_vm-deploy-namespace) | The namespace where your VMs will be deployed. | `string` | `"virtual-machines"` | no |
+| [vm-labels](#input\_vm-labels) | The labels that will be applied to your VM. | `set(string)` | [
"vmo-tutorial-vm"
]
| no |
| [vm-memory-Gi](#input\_vm-memory-Gi) | The amount of storage to provision for your VM in Gi. | `string` | n/a | yes |
| [vm-storage-Gi](#input\_vm-storage-Gi) | The amount of storage to provision for your VM in Gi. | `string` | n/a | yes |
-| [vm-vlans](#input\_vm-vlans) | VM allowed VLANs. | `number` | `1` | no |
-| [vmo-cluster-name](#input\_vmo-cluster-name) | The name of the cluster. | `string` | n/a | yes |
-| [vmo-network-interface](#input\_vmo-network-interface) | The network interface VMO will use for VM traffic. | `set(string)` | n/a | yes |
-| [wrk-node-min-cpu](#input\_wrk-node-min-cpu) | Minimum number of CPU cores allocated to the Control Plane node. | `number` | n/a | yes |
-| [wrk-node-min-memory-mb](#input\_wrk-node-min-memory-mb) | Minimum amount of RAM allocated to the Control Plane node. | `number` | n/a | yes |
+| [vm-vlans](#input\_vm-vlans) | VM allowed VLANs. | `string` | `"1"` | no |
+| [vmo-cluster-name](#input\_vmo-cluster-name) | The name of the cluster. | `string` | `"vmo-tutorial-cluster"` | no |
+| [vmo-network-interface](#input\_vmo-network-interface) | The host network interface VMO will use for VM traffic. | `string` | `"br0"` | no |
## Outputs
diff --git a/terraform/vmo-cluster/cluster_profiles.tf b/terraform/vmo-cluster/cluster_profiles.tf
index 9becb9b..775bb95 100644
--- a/terraform/vmo-cluster/cluster_profiles.tf
+++ b/terraform/vmo-cluster/cluster_profiles.tf
@@ -8,8 +8,8 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
description = "A basic cluster profile for MAAS VMO"
tags = concat(var.tags, ["env:maas"])
cloud = "maas"
- type = var.cluster-profile-type
- version = var.cluster-profile-version
+ type = "cluster"
+ version = "1.0.0"
pack {
name = data.spectrocloud_pack.maas_ubuntu.name
@@ -22,14 +22,11 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
}
pack {
- name = data.spectrocloud_pack.maas_k8s.name
- tag = data.spectrocloud_pack.maas_k8s.version
- uid = data.spectrocloud_pack.maas_k8s.id
- values = templatefile("manifests/k8s-values.yaml", {
- pod-CIDR = var.pod-CIDR,
- clusterServicesCIDR = var.cluster-services-CIDR
- })
- type = "spectro"
+ name = data.spectrocloud_pack.maas_k8s.name
+ tag = data.spectrocloud_pack.maas_k8s.version
+ uid = data.spectrocloud_pack.maas_k8s.id
+ values = file("manifests/k8s-values.yaml")
+ type = "spectro"
}
pack {
@@ -41,13 +38,11 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
}
pack {
- name = data.spectrocloud_pack.maas_csi.name
- tag = data.spectrocloud_pack.maas_csi.version
- uid = data.spectrocloud_pack.maas_csi.id
- values = templatefile("manifests/csi-values.yaml", {
- worker_nodes = var.maas-worker-nodes
- })
- type = "spectro"
+ name = data.spectrocloud_pack.maas_csi.name
+ tag = data.spectrocloud_pack.maas_csi.version
+ uid = data.spectrocloud_pack.maas_csi.id
+ values = file("manifests/csi-values.yaml")
+ type = "spectro"
}
pack {
@@ -79,8 +74,9 @@ resource "spectrocloud_cluster_profile" "maas-vmo-profile" {
values = file("manifests/vmo-extras-values.yaml")
manifest {
name = "vmo-extras"
- content = file("manifests/vmo-extras-manifest.yaml")
+ content = templatefile("manifests/vmo-extras-manifest.yaml", {
+ palette-user-id = var.palette-user-id
+ })
}
}
-
-}
+}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/clusters.tf b/terraform/vmo-cluster/clusters.tf
index cfb02e5..3341565 100644
--- a/terraform/vmo-cluster/clusters.tf
+++ b/terraform/vmo-cluster/clusters.tf
@@ -8,7 +8,7 @@
resource "spectrocloud_cluster_maas" "maas-cluster" {
count = var.deploy-maas ? 1 : 0
- name = var.vmo-cluster-name
+ name = "vmo-tutorial-cluster"
tags = concat(var.tags, ["env:maas"])
cloud_account_id = data.spectrocloud_cloudaccount_maas.account[0].id
pause_agent_upgrades = "unlock"
@@ -28,8 +28,8 @@ resource "spectrocloud_cluster_maas" "maas-cluster" {
azs = var.maas-control-plane-azs
node_tags = var.maas-control-plane-node-tags
instance_type {
- min_cpu = var.ctl-node-min-cpu
- min_memory_mb = var.ctl-node-min-memory-mb
+ min_cpu = var.maas-control-node-min-cpu
+ min_memory_mb = var.maas-control-node-min-memory-mb
}
placement {
resource_pool = var.maas-control-plane-resource-pool
@@ -42,8 +42,8 @@ resource "spectrocloud_cluster_maas" "maas-cluster" {
azs = var.maas-worker-azs
node_tags = var.maas-worker-node-tags
instance_type {
- min_cpu = var.wrk-node-min-cpu
- min_memory_mb = var.wrk-node-min-memory-mb
+ min_cpu = var.maas-worker-node-min-cpu
+ min_memory_mb = var.maas-worker-node-min-memory-mb
}
placement {
resource_pool = var.maas-worker-resource-pool
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
index baac57c..2b15c62 100644
--- a/terraform/vmo-cluster/data.tf
+++ b/terraform/vmo-cluster/data.tf
@@ -56,6 +56,6 @@ data "spectrocloud_pack" "maas_vmo" {
data "spectrocloud_cluster" "maas_vmo_cluster" {
count = var.deploy-maas-vm ? 1 : 0
depends_on = [spectrocloud_cluster_maas.maas-cluster]
- name = "vmo-cluster-maas"
+ name = "vmo-tutorial-cluster"
context = "project"
}
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index b7ee3eb..70805fd 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -11,9 +11,18 @@ variable "palette-project" {
validation {
condition = var.palette-project != ""
- error_message = "Provide the correct Palette project."
+ error_message = "Provide a Palette project name."
}
+}
+variable "palette-user-id" {
+ type = string
+ description = "The name of your project in Palette."
+
+ validation {
+ condition = var.palette-user-id != ""
+ error_message = "Please provide a Palette user ID."
+ }
}
######################
@@ -31,73 +40,46 @@ variable "tags" {
]
}
-
-
-
-#####################
-# cluster_profiles.tf
-#####################
-
-variable "cluster-profile-type" {
- type = string
- description = "The name of the PCG that will be used to deploy the cluster."
-
- validation {
- condition = var.deploy-maas ? var.cluster-profile-type != "REPLACE ME" && lower(var.cluster-profile-type) == "cluster" || lower(var.cluster-profile-type) == "infra" || lower(var.cluster-profile-type) == "add-on" || lower(var.cluster-profile-type) == "system" : true
- error_message = "Cluster profile type must be 'cluster', 'infra', 'add-on', or 'system'."
- }
-}
-
-variable "cluster-profile-version" {
- type = string
- description = "The name of the PCG that will be used to deploy the cluster."
-
- validation {
- condition = var.deploy-maas ? var.cluster-profile-version != "REPLACE ME" && var.cluster-profile-version != "" : true
- error_message = "Cluster profile version must be set."
- }
-}
-
#########################
# clusters.tf
#########################
-variable "ctl-node-min-cpu" {
+variable "maas-control-node-min-cpu" {
type = number
description = "Minimum number of CPU cores allocated to the Control Plane node."
validation {
- condition = var.deploy-maas ? var.ctl-node-min-cpu > 0 : true
+ condition = var.deploy-maas ? var.maas-control-node-min-cpu > 0 : true
error_message = "Provide a valid number of cores for your Control Plane node."
}
}
-variable "ctl-node-min-memory-mb" {
+variable "maas-control-node-min-memory-mb" {
type = number
description = "Minimum amount of RAM allocated to the Control Plane node."
validation {
- condition = var.deploy-maas ? var.ctl-node-min-memory-mb > 0 : true
+ condition = var.deploy-maas ? var.maas-control-node-min-memory-mb > 0 : true
error_message = "Provide a valid amount of RAM (MB) for your Control Plane node."
}
}
-variable "wrk-node-min-cpu" {
+variable "maas-worker-node-min-cpu" {
type = number
- description = "Minimum number of CPU cores allocated to the Control Plane node."
+ description = "Minimum number of CPU cores allocated to the worker node."
validation {
- condition = var.deploy-maas ? var.wrk-node-min-cpu > 0 : true
- error_message = "Provide a valid number of cores for your worker node."
+ condition = var.deploy-maas ? var.maas-worker-node-min-cpu > 0 : true
+ error_message = "Provide a valid number of CPU cores for your worker node."
}
}
-variable "wrk-node-min-memory-mb" {
+variable "maas-worker-node-min-memory-mb" {
type = number
- description = "Minimum amount of RAM allocated to the Control Plane node."
+ description = "Minimum amount of RAM allocated to the worker node."
validation {
- condition = var.deploy-maas ? var.wrk-node-min-memory-mb > 0 : true
+ condition = var.deploy-maas ? var.maas-worker-node-min-memory-mb > 0 : true
error_message = "Provide a valid amount of RAM (MB) for your worker node."
}
}
@@ -105,6 +87,7 @@ variable "wrk-node-min-memory-mb" {
variable "vmo-cluster-name" {
type = string
description = "The name of the cluster."
+ default = "vmo-tutorial-cluster"
validation {
condition = var.deploy-maas ? var.vmo-cluster-name != "REPLACE ME" && var.vmo-cluster-name != "" : true
@@ -128,7 +111,7 @@ variable "pcg-name" {
validation {
condition = var.deploy-maas ? var.pcg-name != "REPLACE ME" && var.pcg-name != "" : true
- error_message = "Provide the correct MAAS PCG name."
+ error_message = "Provide a valid MAAS PCG name."
}
}
@@ -138,18 +121,7 @@ variable "maas-domain" {
validation {
condition = var.deploy-maas ? var.maas-domain != "REPLACE ME" && var.maas-domain != "" : true
- error_message = "Provide the correct MAAS domain."
- }
-}
-
-variable "maas-worker-nodes" {
- type = number
- description = "Number of MaaS worker nodes"
- default = 1
-
- validation {
- condition = var.deploy-maas ? var.maas-worker-nodes > 0 : true
- error_message = "Provide a valid number of worker nodes."
+ error_message = "Provide a valid MAAS domain."
}
}
@@ -183,24 +155,13 @@ variable "maas-worker-node-tags" {
}
}
-variable "maas-control-plane-nodes" {
- type = number
- description = "Number of MaaS control plane nodes"
- default = 1
-
- validation {
- condition = var.deploy-maas ? var.maas-control-plane-nodes > 0 : true
- error_message = "Provide a valid number of control plane nodes."
- }
-}
-
variable "maas-control-plane-resource-pool" {
type = string
description = "Resource pool for the MAAS control plane nodes."
validation {
condition = var.deploy-maas ? var.maas-control-plane-resource-pool != "REPLACE ME" && var.maas-control-plane-resource-pool != "" : true
- error_message = "Provide a valid resource pool for worker nodes."
+ error_message = "Provide a valid resource pool for MAAS control plane nodes."
}
}
@@ -210,7 +171,7 @@ variable "maas-control-plane-azs" {
validation {
condition = var.deploy-maas ? !contains(var.maas-control-plane-azs, "REPLACE ME") && length(var.maas-control-plane-azs) != 0 : true
- error_message = "Provide a valid set of AZs for control plane nodes."
+ error_message = "Provide a valid set of AZs for MAAS control plane nodes."
}
}
@@ -220,31 +181,7 @@ variable "maas-control-plane-node-tags" {
validation {
condition = var.deploy-maas ? !contains(var.maas-control-plane-node-tags, "REPLACE ME") && length(var.maas-control-plane-node-tags) != 0 : true
- error_message = "Provide a valid set of node tags for control plane nodes."
- }
-}
-
-#################
-# /manifests/k8s-values.yaml
-#################
-
-variable "pod-CIDR" {
- type = set(string)
- description = "CIDR notation subnets for the pd network ex. 192.168.1.0/24."
-
- validation {
- condition = var.deploy-maas ? !contains(var.pod-CIDR, "REPLACE ME") && length(var.pod-CIDR) != 0 : true
- error_message = "Provide a valid Subnet (CIDR Notation) for the pod network."
- }
-}
-
-variable "cluster-services-CIDR" {
- type = set(string)
- description = "CIDR notation subnets for cluster services ex. 192.168.1.0/24."
-
- validation {
- condition = var.deploy-maas ? !contains(var.cluster-services-CIDR, "REPLACE ME") && length(var.cluster-services-CIDR) != 0 : true
- error_message = "Provide a valid Subnet (CIDR Notation for cluster services."
+ error_message = "Provide a valid set of node tags for MAAS control plane nodes."
}
}
@@ -253,11 +190,11 @@ variable "cluster-services-CIDR" {
#####################
variable "metallb-ip-pool" {
- type = set(string)
+ type = string
description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
validation {
- condition = var.deploy-maas ? !contains(var.metallb-ip-pool, "REPLACE ME") && length(var.metallb-ip-pool) != 0 : true
+ condition = var.deploy-maas ? var.metallb-ip-pool != "REPLACE ME" && length(var.metallb-ip-pool) != 0 : true
error_message = "Provide a valid Subnet (CIDR Notation) or IP Range (192.168.1.0-192.168.1.255) for MetalLB."
}
}
@@ -267,25 +204,26 @@ variable "metallb-ip-pool" {
#################
variable "vmo-network-interface" {
- type = set(string)
- description = "The network interface VMO will use for VM traffic."
+ type = string
+ description = "The host network interface VMO will use for VM traffic."
+ default = "br0"
validation {
- condition = var.deploy-maas ? !contains(var.vmo-network-interface, "REPLACE ME") && length(var.vmo-network-interface) != 0 : true
- error_message = "Provide a valid network interface for the VMO service to use."
+ condition = var.deploy-maas ? var.vmo-network-interface != "REPLACE ME" && length(var.vmo-network-interface) != 0 : true
+ error_message = "Provide a valid host network interface for the VMO service to use."
}
}
variable "vm-vlans" {
- type = number
+ type = string
description = "VM allowed VLANs."
- default = 1
+ default = "1"
}
variable "host-vlans" {
- type = number
+ type = string
description = "Node Allowed VLANs"
- default = 1
+ default = "1"
}
#################
@@ -298,11 +236,10 @@ variable "node-network" {
validation {
condition = var.deploy-maas ? var.node-network != "REPLACE ME" && length(var.node-network) != 0 : true
- error_message = "Provide a valid network interface for the VMO service to use."
+ error_message = "Provide a valid network (CIDR notation) for the OS to use."
}
}
-
#####################
# virtual_machines.tf
#####################
@@ -310,6 +247,7 @@ variable "node-network" {
variable "vm-deploy-namespace" {
type = string
description = "The namespace where your VMs will be deployed."
+ default = "virtual-machines"
validation {
condition = var.deploy-maas ? var.vm-deploy-namespace != "REPLACE ME" && length(var.vm-deploy-namespace) != 0 : true
@@ -320,6 +258,7 @@ variable "vm-deploy-namespace" {
variable "vm-deploy-name" {
type = string
description = "The namespace where your VMs will be deployed."
+ default = "vmo-tutorial-vm"
validation {
condition = var.deploy-maas ? var.vm-deploy-name != "REPLACE ME" && length(var.vm-deploy-name) != 0 : true
@@ -329,7 +268,8 @@ variable "vm-deploy-name" {
variable "vm-labels" {
type = set(string)
- description = "The namespace where your VMs will be deployed."
+ description = "The labels that will be applied to your VM."
+ default = ["vmo-tutorial-vm"]
validation {
condition = var.deploy-maas ? var.vm-labels != "REPLACE ME" && length(var.vm-labels) != 0 : true
@@ -360,7 +300,7 @@ variable "vm-cpu-cores" {
variable "vm-cpu-sockets" {
type = number
- description = "Number of CPU cores to allocate to your VM."
+ description = "Number of CPU sockets the assigned CPU cores should be spread across."
default = 1
validation {
@@ -371,7 +311,7 @@ variable "vm-cpu-sockets" {
variable "vm-cpu-threads" {
type = number
- description = "Number of CPU cores to allocate to your VM."
+ description = "Number of CPU threads your VM can use."
default = 1
validation {
diff --git a/terraform/vmo-cluster/manifests/metallb-values.yaml b/terraform/vmo-cluster/manifests/metallb-values.yaml
index 30af484..71e91d9 100644
--- a/terraform/vmo-cluster/manifests/metallb-values.yaml
+++ b/terraform/vmo-cluster/manifests/metallb-values.yaml
@@ -19,7 +19,7 @@ charts:
first-pool:
spec:
addresses:
- - 10.11.130.129-10.11.130.131
+ - ${metallb-ip-pool}
# - 192.168.100.50-192.168.100.60
avoidBuggyIPs: true
autoAssign: true
diff --git a/terraform/vmo-cluster/manifests/ubuntu-values.yaml b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
index 26d80be..9b0a9e7 100644
--- a/terraform/vmo-cluster/manifests/ubuntu-values.yaml
+++ b/terraform/vmo-cluster/manifests/ubuntu-values.yaml
@@ -6,7 +6,7 @@ kubeadmconfig:
- apt install -y grepcidr
- |
# Enter as a CIDR '10.11.130.0/24'
- NETWORKS="10.11.130.0/24"
+ NETWORKS=${node-network}
IPS=$(hostname -I)
for IP in $IPS
do
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
index 6244567..a8bea4e 100644
--- a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -129,4 +129,32 @@ spec:
- accessModes:
- ReadWriteMany
volumeMode: Block
- cloneStrategy: csi-clone
\ No newline at end of file
+ cloneStrategy: csi-clone
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ app.kubernetes.io/name: virtual-machine-orchestrator
+ name: virtual-machine-orchestrator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+- kind: ServiceAccount
+ name: virtual-machine-orchestrator
+ namespace: vm-dashboard
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cluster-admin
+subjects:
+- kind: User
+ name: ${palette-user-id}
+ apiGroup: rbac.authorization.k8s.io
+roleRef:
+ kind: ClusterRole
+ name: cluster-admin
+ apiGroup: rbac.authorization.k8s.io
diff --git a/terraform/vmo-cluster/manifests/vmo-values.yaml b/terraform/vmo-cluster/manifests/vmo-values.yaml
index 8e66e80..f6c9c70 100644
--- a/terraform/vmo-cluster/manifests/vmo-values.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-values.yaml
@@ -81,13 +81,13 @@ charts:
tag: "latest"
env:
# Which bridge interface to control
- bridgeIF: "br0"
+ bridgeIF: ${vmo-network-interface}
# Beginning of VLAN range to enable
- allowedVlans: "1"
+ allowedVlans: ${vm-vlans}
# Set to "true" to enable VLANs on the br0 interface for the host to use itself
allowVlansOnSelf: "true"
# Beginning of VLAN range to enable for use by the node itself
- allowedVlansOnSelf: "1"
+ allowedVlansOnSelf: ${host-vlans}
snapshot-controller:
enabled: true
replicas: 1
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index e01a6c0..f5dee74 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -4,69 +4,52 @@
#####################
# Palette Settings
#####################
-palette-project = "Default" # The name of your project in Palette.
+palette-project = "REPLACE_ME" # The name of your project in Palette.
+palette-user-id = "REPLACE_ME" # Your Palette user ID
############################
# MAAS Deployment Settings
############################
-deploy-maas = false # Set to true to deploy to a new VMO cluster to MAAS.
-deploy-maas-vm = false # Set to true to create a VM on MAAS VMO cluster once deployed.
-pcg-name = "REPLACE ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
-maas-domain = "REPLACE ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
-maas-control-plane-nodes = 1 # Provide the number of control plane nodes that will be used for the Palette cluster.
-maas-control-plane-resource-pool = "REPLACE ME" # Provide a resource pool for the control plane nodes.
-maas-control-plane-azs = ["REPLACE ME"] # Provide a set of availability zones for the control plane nodes.
-maas-control-plane-node-tags = ["REPLACE ME"] # Provide a set of node tags for the control plane nodes.
-ctl-node-min-cpu = 6 # Minimum number of CPU cores required for control plane nodes
-ctl-node-min-memory-mb = 8096 # Minimum amount of RAM (memory) required for control plane nodes
-maas-worker-nodes = 1 # Provide the number of worker nodes that will be used for the Palette cluster.
-maas-worker-resource-pool = "REPLACE ME" # Provide a resource pool for the worker nodes.
-maas-worker-azs = ["REPLACE ME"] # Provide a set of availability zones for the worker nodes.
-maas-worker-node-tags = ["REPLACE ME"] # Provide a set of node tags for the worker nodes.
-wrk-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes
-wrk-node-min-memory-mb = 16384 # Minimum amount of RAM (memory) required for worker nodes
-
-#####################
-# cluster_profiles.tf
-#####################
-
-vmo-cluster-name = "REPLACE ME"
-cluster-profile-type = "cluster" # infra, cluster, add-on, or system
-cluster-profile-version = "1.0.0" # Version number for the cluster profile in Palette
-
-#####################
-# vmo-values.tf
-####################
-vmo-network-interface = ["br0"]
-vm-vlans = 1
-host-vlans = 1
-
-###########################
-# manifests/k8s-values.yaml
-###########################
-pod-CIDR = ["100.64.0.0/18"] # Set the subnet that your pods will run on
-cluster-services-CIDR = ["100.64.64.0/18"]
+deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
+deploy-maas-vm = true # Set to true to create a VM on MAAS VMO cluster once deployed.
+pcg-name = "REPLACE_ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
+maas-domain = "REPLACE_ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
+maas-control-plane-resource-pool = "REPLACE_ME" # Provide a resource pool for the control plane nodes.
+maas-control-plane-azs = ["REPLACE_ME"] # Provide a set of availability zones for the control plane nodes.
+maas-control-plane-node-tags = ["REPLACE_ME"] # Provide a set of node tags for the control plane nodes.
+maas-control-node-min-cpu = REPLACE_ME # Minimum number of CPU cores required for control plane nodes
+maas-control-node-min-memory-mb = REPLACE_ME # Minimum amount of RAM (memory) required for control plane nodes
+maas-worker-resource-pool = "REPLACE_ME" # Provide a resource pool for the worker nodes.
+maas-worker-azs = ["REPLACE_ME"] # Provide a set of availability zones for the worker nodes.
+maas-worker-node-tags = ["REPLACE_ME"] # Provide a set of node tags for the worker nodes.
+maas-worker-node-min-cpu = REPLACE_ME # Minimum number of CPU cores required for worker nodes
+maas-worker-node-min-memory-mb = REPLACE_ME # Minimum amount of RAM (memory) required for worker nodes
###############################
# manifests/metallb-values.yaml
###############################
-metallb-ip-pool = ["REPLACE ME"] # IP addresses to be assigned for use by MetalLB
+metallb-ip-pool = "REPLACE_ME" # IP addresses to be assigned for use by MetalLB. Example 192.168.0.1-192.168.0.4
###############################
# manifests/ubuntu-values.yaml
###############################
-node-network = "REPLACE ME" # IP addresses to be assigned for use by MetalLB
+node-network = "REPLACE_ME" # IP addresses to be assigned for use by the Ubuntu Kubelet services.
#####################
-# virtual_machines.tf
+# virtual_machines.tf - we recommend leaving these values as is to avoid issues with other resource requirements.
#####################
vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
-vm-deploy-name = "REPLACE ME" # The name of your VM
-vm-labels = ["REPLACE ME"] # Labels that will be applied to your VM.
-vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have.
+vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have. Leave this value at 50Gi.
vm-cpu-cores = 2 # Number of CPU cores your VM will have.
vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
-vm-cpu-threads = 2 # Number of CPU threads to use for the VM CPU
+vm-cpu-threads = 1 # Number of CPU threads to use for the VM CPU
vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
+
+#####################
+# vmo-values.tf
+####################
+vmo-network-interface = "br0" # The name of the host network interface VMO will use. Default is 'br0'
+vm-vlans = "1" # VLANs your VMs must be able to communicate on. Default is "1" which is `untagged`.
+host-vlans = "1" # VLANs your VMO host needs to communicate on. Default is "1" which is `untagged`.
From a1b530fdb192e422117d4d8e424e23a7a26c9d75 Mon Sep 17 00:00:00 2001
From: kenheaslip-sc
Date: Thu, 19 Jun 2025 10:00:21 -0400
Subject: [PATCH 11/11] Final-Commit
---
terraform/vmo-cluster/data.tf | 4 ---
terraform/vmo-cluster/inputs.tf | 30 +++++++++++--------
.../manifests/vmo-extras-manifest.yaml | 22 +++-----------
terraform/vmo-cluster/terraform.tfvars | 22 +++++++-------
.../virtual-machine-missing-values.tftest.hcl | 2 +-
...virtual-machines-replace-values.tftest.hcl | 6 +---
terraform/vmo-cluster/virtual_machines.tf | 4 +--
7 files changed, 35 insertions(+), 55 deletions(-)
diff --git a/terraform/vmo-cluster/data.tf b/terraform/vmo-cluster/data.tf
index 2b15c62..173f02c 100644
--- a/terraform/vmo-cluster/data.tf
+++ b/terraform/vmo-cluster/data.tf
@@ -8,10 +8,6 @@ data "spectrocloud_registry" "public_registry" {
name = "Public Repo"
}
-######
-# MAAS
-######
-
data "spectrocloud_cloudaccount_maas" "account" {
count = var.deploy-maas ? 1 : 0
name = var.pcg-name
diff --git a/terraform/vmo-cluster/inputs.tf b/terraform/vmo-cluster/inputs.tf
index 70805fd..f6fbbe5 100644
--- a/terraform/vmo-cluster/inputs.tf
+++ b/terraform/vmo-cluster/inputs.tf
@@ -47,6 +47,7 @@ variable "tags" {
variable "maas-control-node-min-cpu" {
type = number
description = "Minimum number of CPU cores allocated to the Control Plane node."
+ default = 8
validation {
condition = var.deploy-maas ? var.maas-control-node-min-cpu > 0 : true
@@ -57,6 +58,7 @@ variable "maas-control-node-min-cpu" {
variable "maas-control-node-min-memory-mb" {
type = number
description = "Minimum amount of RAM allocated to the Control Plane node."
+ default = 32
validation {
condition = var.deploy-maas ? var.maas-control-node-min-memory-mb > 0 : true
@@ -67,6 +69,7 @@ variable "maas-control-node-min-memory-mb" {
variable "maas-worker-node-min-cpu" {
type = number
description = "Minimum number of CPU cores allocated to the worker node."
+ default = 8
validation {
condition = var.deploy-maas ? var.maas-worker-node-min-cpu > 0 : true
@@ -77,6 +80,7 @@ variable "maas-worker-node-min-cpu" {
variable "maas-worker-node-min-memory-mb" {
type = number
description = "Minimum amount of RAM allocated to the worker node."
+ default = 32
validation {
condition = var.deploy-maas ? var.maas-worker-node-min-memory-mb > 0 : true
@@ -117,7 +121,7 @@ variable "pcg-name" {
variable "maas-domain" {
type = string
- description = "MAAS domain"
+ description = "The name of the MAAS domain."
validation {
condition = var.deploy-maas ? var.maas-domain != "REPLACE ME" && var.maas-domain != "" : true
@@ -185,23 +189,23 @@ variable "maas-control-plane-node-tags" {
}
}
-#####################
+################################
# /manifests/metallb-values.yaml
-#####################
+################################
variable "metallb-ip-pool" {
type = string
- description = "CIDR notation subnets or IP range ex. 192.168.1.0/24 or 192.168.1.0-192.168.1.255"
+ description = "CIDR notation subnets or IP range for MetalLb. For example, 192.168.1.0/24 or 192.168.1.0-192.168.1.255."
validation {
condition = var.deploy-maas ? var.metallb-ip-pool != "REPLACE ME" && length(var.metallb-ip-pool) != 0 : true
- error_message = "Provide a valid Subnet (CIDR Notation) or IP Range (192.168.1.0-192.168.1.255) for MetalLB."
+ error_message = "Provide a valid subnet (CIDR Notation) or IP Range (192.168.1.0-192.168.1.255) for MetalLB."
}
}
-#################
+############################
# /manifests/vmo-values.yaml
-#################
+############################
variable "vmo-network-interface" {
type = string
@@ -226,9 +230,9 @@ variable "host-vlans" {
default = "1"
}
-#################
+###############################
# /manifests/ubuntu-values.yaml
-#################
+###############################
variable "node-network" {
type = string
@@ -283,14 +287,14 @@ variable "vm-storage-Gi" {
validation {
condition = var.deploy-maas ? var.vm-storage-Gi != "REPLACE ME" && length(var.vm-storage-Gi) != 0 && endswith((var.vm-storage-Gi), "Gi") : true
- error_message = "Provide a valid amount of storage for your VM. You must include 'Gi' at the end of your numerical value. Example: '50Gi'."
+ error_message = "Provide a valid amount of storage for your VM. You must include 'Gi' at the end of your numerical value. For example, '50Gi'."
}
}
variable "vm-cpu-cores" {
type = number
description = "Number of CPU cores to allocate to your VM."
- default = 1
+ default = 4
validation {
condition = var.deploy-maas ? var.vm-cpu-cores > 0 : true
@@ -323,9 +327,9 @@ variable "vm-cpu-threads" {
variable "vm-memory-Gi" {
type = string
description = "The amount of storage to provision for your VM in Gi."
-
+ default = "8Gi"
validation {
condition = var.deploy-maas ? var.vm-memory-Gi != "REPLACE ME" && length(var.vm-memory-Gi) != 0 && endswith((var.vm-memory-Gi), "Gi") : true
- error_message = "Provide a valid amount of memory to allocate your VM. You must include 'Gi' at the end of your numerical value. Example: '4Gi'."
+ error_message = "Provide a valid amount of memory to allocate your VM. You must include 'Gi' at the end of your numerical value. For example, '4Gi'."
}
}
\ No newline at end of file
diff --git a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
index a8bea4e..eb99934 100644
--- a/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
+++ b/terraform/vmo-cluster/manifests/vmo-extras-manifest.yaml
@@ -29,10 +29,11 @@ spec:
labels:
kubevirt.io/size: small
kubevirt.io/domain: hellouni
+ app: hello-universe
spec:
domain:
cpu:
- cores: 2
+ cores: 4
sockets: 1
threads: 1
devices:
@@ -52,9 +53,9 @@ spec:
type: q35
resources:
limits:
- memory: 2Gi
+ memory: 16Gi
requests:
- memory: 2Gi
+ memory: 8Gi
networks:
- name: default
pod: {}
@@ -133,21 +134,6 @@ spec:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
-metadata:
- labels:
- app.kubernetes.io/name: virtual-machine-orchestrator
- name: virtual-machine-orchestrator
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-subjects:
-- kind: ServiceAccount
- name: virtual-machine-orchestrator
- namespace: vm-dashboard
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
metadata:
name: cluster-admin
subjects:
diff --git a/terraform/vmo-cluster/terraform.tfvars b/terraform/vmo-cluster/terraform.tfvars
index f5dee74..aaefca1 100644
--- a/terraform/vmo-cluster/terraform.tfvars
+++ b/terraform/vmo-cluster/terraform.tfvars
@@ -11,25 +11,25 @@ palette-user-id = "REPLACE_ME" # Your Palette user ID
# MAAS Deployment Settings
############################
-deploy-maas = true # Set to true to deploy to a new VMO cluster to MAAS.
-deploy-maas-vm = true # Set to true to create a VM on MAAS VMO cluster once deployed.
+deploy-maas = false # Set to true to deploy a new VMO cluster to MAAS.
+deploy-maas-vm = false # Set to true to create a VM on the MAAS VMO cluster once deployed.
pcg-name = "REPLACE_ME" # Provide the name of the PCG that will be used to deploy the Palette cluster.
maas-domain = "REPLACE_ME" # Provide the MAAS domain that will be used to deploy the Palette cluster.
maas-control-plane-resource-pool = "REPLACE_ME" # Provide a resource pool for the control plane nodes.
maas-control-plane-azs = ["REPLACE_ME"] # Provide a set of availability zones for the control plane nodes.
maas-control-plane-node-tags = ["REPLACE_ME"] # Provide a set of node tags for the control plane nodes.
-maas-control-node-min-cpu = REPLACE_ME # Minimum number of CPU cores required for control plane nodes
-maas-control-node-min-memory-mb = REPLACE_ME # Minimum amount of RAM (memory) required for control plane nodes
+maas-control-node-min-cpu = 8 # Minimum number of CPU cores required for control plane nodes.
+maas-control-node-min-memory-mb = 32768 # Minimum amount of RAM (memory) required for control plane nodes.
maas-worker-resource-pool = "REPLACE_ME" # Provide a resource pool for the worker nodes.
maas-worker-azs = ["REPLACE_ME"] # Provide a set of availability zones for the worker nodes.
maas-worker-node-tags = ["REPLACE_ME"] # Provide a set of node tags for the worker nodes.
-maas-worker-node-min-cpu = REPLACE_ME # Minimum number of CPU cores required for worker nodes
-maas-worker-node-min-memory-mb = REPLACE_ME # Minimum amount of RAM (memory) required for worker nodes
+maas-worker-node-min-cpu = 8 # Minimum number of CPU cores required for worker nodes.
+maas-worker-node-min-memory-mb = 32768 # Minimum amount of RAM (memory) required for worker nodes
###############################
# manifests/metallb-values.yaml
###############################
-metallb-ip-pool = "REPLACE_ME" # IP addresses to be assigned for use by MetalLB. Example 192.168.0.1-192.168.0.4
+metallb-ip-pool = "REPLACE_ME" # IP addresses to be assigned for use by MetalLB. For example, 192.168.0.1-192.168.0.4.
###############################
# manifests/ubuntu-values.yaml
@@ -41,10 +41,10 @@ node-network = "REPLACE_ME" # IP addresses to be assigned for use by the Ubuntu
#####################
vm-deploy-namespace = "virtual-machines" # Namespace where your VM will be deployed.
vm-storage-Gi = "50Gi" # Size of the disk (PVC) that your VM will have. Leave this value at 50Gi.
-vm-cpu-cores = 2 # Number of CPU cores your VM will have.
-vm-cpu-sockets = 1 # Number of physical CPU sockets the CPU cores should be spread over.
-vm-cpu-threads = 1 # Number of CPU threads to use for the VM CPU
-vm-memory-Gi = "4Gi" # Amount of RAM (memory) your VM will have
+vm-cpu-cores = 2 # The number of CPU cores your VM will have.
+vm-cpu-sockets = 1 # The umber of physical CPU sockets the CPU cores should be spread over.
+vm-cpu-threads = 1 # The umber of CPU threads to use for the VM CPU.
+vm-memory-Gi = "4Gi" # The amount of RAM (memory) your VM will have.
#####################
diff --git a/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl b/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl
index bd64f34..4e1895b 100644
--- a/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/virtual-machine-missing-values.tftest.hcl
@@ -1,6 +1,6 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0
-# Test case 4 - Verify PCG name, domain, resource pools, AZs and node tags cannot be empty.
+# Test case 4 - Verify VM Variables cannot be empty.
variables {
deploy-maas = true
diff --git a/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl b/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
index dda2366..3b8b332 100644
--- a/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
+++ b/terraform/vmo-cluster/tests/virtual-machines-replace-values.tftest.hcl
@@ -1,6 +1,6 @@
# Copyright (c) Spectro Cloud
# SPDX-License-Identifier: Apache-2.0
-# Test case 5 - Verify PCG name, domain, resource pools, AZs and node tags cannot have REPLACE ME values.
+# Test case 5 - Verify VM variables cannot have REPLACE ME values.
variables {
@@ -12,10 +12,6 @@ vm-cpu-cores = 2
vm-cpu-sockets = 1
vm-cpu-threads = 2
vm-memory-Gi = "REPLACE ME"
-
-
-
-
}
mock_provider "spectrocloud" {
diff --git a/terraform/vmo-cluster/virtual_machines.tf b/terraform/vmo-cluster/virtual_machines.tf
index cc45d51..1a1f254 100644
--- a/terraform/vmo-cluster/virtual_machines.tf
+++ b/terraform/vmo-cluster/virtual_machines.tf
@@ -8,9 +8,7 @@ resource "spectrocloud_virtual_machine" "virtual-machine" {
cluster_uid = data.spectrocloud_cluster.maas_vmo_cluster[0].id
cluster_context = data.spectrocloud_cluster.maas_vmo_cluster[0].context
-
- #run_on_launch = true
- run_strategy = "Halted"
+ run_strategy = "Always"
namespace = var.vm-deploy-namespace
name = var.vm-deploy-name