From f1fe4f9cc2ba1a5de31755431ea593edfa7def45 Mon Sep 17 00:00:00 2001 From: Paul Payne Date: Tue, 24 Jun 2025 15:12:53 -0700 Subject: [PATCH] Settle on v1 setup method. Test run completed successfully from bootstrap to service setup. - Refactor dnsmasq configuration and scripts for improved variable handling and clarity - Updated dnsmasq configuration files to use direct variable references instead of data source functions for better readability. - Modified setup scripts to ensure they are run from the correct environment and directory, checking for the WC_HOME variable. - Changed paths in README and scripts to reflect the new directory structure. - Enhanced error handling in setup scripts to provide clearer guidance on required configurations. - Adjusted kernel and initramfs URLs in boot.ipxe to use the updated variable references. --- .gitignore | 1 - bin/backup | 23 + bin/helm-chart-to-kustomize | 136 + bin/wild-compile-template | 23 +- bin/wild-compile-template-dir | 98 + bin/wild-config | 18 +- bin/wild-secret | 74 + bin/wild-talos-iso | 137 + bin/wild-talos-schema | 113 + docs/SETUP.md | 6 + env.sh | 49 + load-env.sh | 178 - my-scaffold/.env.example | 2 - my-scaffold/.gitignore | 4 +- my-scaffold/.wildcloud/.gitkeep | 0 my-scaffold/config.example.yaml | 43 +- my-scaffold/docs/node-setup.md | 246 + my-scaffold/env.sh | 49 + {setup/cluster => scripts}/setup-utils.sh | 0 setup/README.md | 2 + setup/cluster-nodes/README.md | 291 +- setup/cluster-nodes/create-installer-image.sh | 53 + setup/cluster-nodes/detect-node-hardware.sh | 163 + setup/cluster-nodes/final/.gitkeep | 0 .../cluster-nodes/generate-machine-configs.sh | 115 + .../cluster-nodes/generated/controlplane.yaml | 577 ++ setup/cluster-nodes/generated/secrets.yaml | 23 + setup/cluster-nodes/generated/talosconfig | 7 + setup/cluster-nodes/generated/worker.yaml | 606 ++ setup/cluster-nodes/init-cluster.sh | 80 + setup/cluster-nodes/old/setup_node.sh | 21 - .../patch.templates/controlplane-node-1.yaml | 22 + .../patch.templates/controlplane-node-2.yaml | 22 + .../patch.templates/controlplane-node-3.yaml | 22 + .../cluster-nodes/patch.templates/worker.yaml | 22 + setup/cluster-nodes/patch/.gitkeep | 0 setup/cluster-nodes/patch/controlplane.yaml | 17 - setup/cluster-nodes/patch/worker.yaml | 3 - setup/cluster/README.md | 78 +- setup/cluster/cert-manager/README.md | 0 .../install.sh} | 56 +- .../cert-manager.yaml | 0 .../internal-wildcard-certificate.yaml | 19 + .../kustomize.template/kustomization.yaml | 12 + .../letsencrypt-prod-dns01.yaml | 26 + .../letsencrypt-staging-dns01.yaml | 26 + .../kustomize.template/namespace.yaml | 4 + .../wildcard-certificate.yaml | 19 + .../cert-manager/kustomize/cert-manager.yaml | 5623 +++++++++++++++++ .../internal-wildcard-certificate.yaml | 4 +- .../cert-manager/kustomize/kustomization.yaml | 12 + .../letsencrypt-prod-dns01.yaml | 6 +- .../letsencrypt-staging-dns01.yaml | 6 +- .../cert-manager/kustomize/namespace.yaml | 4 + .../{ => kustomize}/wildcard-certificate.yaml | 4 +- setup/cluster/coredns/README.md | 30 +- setup/cluster/coredns/install.sh | 37 + .../coredns-custom-config.yaml | 28 + .../coredns-lb-service.yaml | 25 + .../coredns-custom-config.yaml | 10 +- .../{ => kustomize}/coredns-lb-service.yaml | 2 +- setup/cluster/docker-registry/README.md | 0 .../docker-registry/config/example.env | 2 - setup/cluster/docker-registry/install.sh | 28 + .../docker-registry/kustomization.yaml | 40 - .../{ => kustomize.template}/deployment.yaml | 0 .../{ => kustomize.template}/ingress.yaml | 4 +- .../kustomize.template/kustomization.yaml | 14 + .../{ => kustomize.template}/namespace.yaml | 0 .../kustomize.template/pvc.yaml | 12 + .../{ => kustomize.template}/service.yaml | 0 .../docker-registry/kustomize/deployment.yaml | 36 + .../docker-registry/kustomize/ingress.yaml | 20 + .../kustomize/kustomization.yaml | 14 + .../docker-registry/kustomize/namespace.yaml | 4 + .../docker-registry/{ => kustomize}/pvc.yaml | 0 .../docker-registry/kustomize/service.yaml | 13 + setup/cluster/externaldns/install.sh | 42 + .../externaldns-cloudflare.yaml | 39 + .../externaldns-rbac.yaml | 0 .../kustomize.template/kustomization.yaml | 7 + .../kustomize.template/namespace.yaml | 4 + .../externaldns-cloudflare.yaml | 2 +- .../kustomize/externaldns-rbac.yaml | 35 + .../externaldns/kustomize/kustomization.yaml | 7 + .../externaldns/kustomize/namespace.yaml | 4 + setup/cluster/install-all.sh | 34 + setup/cluster/kubernetes-dashboard/README.md | 0 setup/cluster/kubernetes-dashboard/install.sh | 60 + .../dashboard-admin-rbac.yaml | 0 .../dashboard-kube-system.yaml | 16 +- .../kustomize.template/kustomization.yaml | 6 + .../kustomize/dashboard-admin-rbac.yaml | 32 + .../kustomize/dashboard-kube-system.yaml | 84 + .../kustomize/kustomization.yaml | 6 + setup/cluster/longhorn/install.sh | 21 + .../kustomization.yaml | 0 .../{ => kustomize.template}/longhorn.yaml | 4 + .../longhorn/kustomize/kustomization.yaml | 5 + .../cluster/longhorn/kustomize/longhorn.yaml | 5189 +++++++++++++++ setup/cluster/metallb/README.md | 0 .../metallb/configuration/kustomization.yaml | 18 - .../{setup-metallb.sh => metallb/install.sh} | 22 +- .../configuration/kustomization.yaml | 3 + .../configuration/pool.yaml | 2 +- .../installation/kustomization.yaml | 0 .../configuration/kustomization.yaml | 3 + .../metallb/kustomize/configuration/pool.yaml | 19 + .../kustomize/installation/kustomization.yaml | 3 + setup/cluster/nfs/README.md | 54 + .../cluster/{setup-nfs.sh => nfs/install.sh} | 50 +- setup/cluster/nfs/kustomization.yaml | 53 - .../nfs/kustomize.template/kustomization.yaml | 6 + .../kustomize.template/persistent-volume.yaml | 23 + .../nfs/kustomize.template/storage-class.yaml | 10 + .../cluster/nfs/kustomize/kustomization.yaml | 6 + .../{ => kustomize}/persistent-volume.yaml | 6 +- .../nfs/{ => kustomize}/storage-class.yaml | 4 +- setup/cluster/{ => nfs}/setup-nfs-host.sh | 0 setup/cluster/setup-all.sh | 55 - setup/cluster/setup-coredns.sh | 30 - setup/cluster/setup-dashboard.sh | 46 - setup/cluster/setup-externaldns.sh | 51 - setup/cluster/setup-longhorn.sh | 16 - setup/cluster/setup-registry.sh | 20 - setup/cluster/setup-traefik.sh | 18 - setup/cluster/traefik/README.md | 24 + setup/cluster/traefik/install.sh | 44 + .../internal-middleware.yaml | 0 .../kustomize.template/kustomization.yaml | 13 + .../traefik/kustomize.template/namespace.yaml | 4 + .../templates/deployment.yaml | 130 + .../kustomize.template/templates/gateway.yaml | 18 + .../templates/gatewayclass.yaml | 13 + .../templates/ingressclass.yaml | 15 + .../templates/ingressroute.yaml | 21 + .../templates/rbac/clusterrole.yaml | 108 + .../templates/rbac/clusterrolebinding.yaml | 19 + .../templates/rbac/serviceaccount.yaml | 14 + .../kustomize.template/templates/service.yaml | 27 + .../traefik-service.yaml | 5 +- .../kustomize/internal-middleware.yaml | 13 + .../traefik/kustomize/kustomization.yaml | 13 + .../cluster/traefik/kustomize/namespace.yaml | 4 + .../kustomize/templates/deployment.yaml | 130 + .../traefik/kustomize/templates/gateway.yaml | 18 + .../kustomize/templates/gatewayclass.yaml | 13 + .../kustomize/templates/ingressclass.yaml | 15 + .../kustomize/templates/ingressroute.yaml | 21 + .../kustomize/templates/rbac/clusterrole.yaml | 108 + .../templates/rbac/clusterrolebinding.yaml | 19 + .../templates/rbac/serviceaccount.yaml | 14 + .../traefik/kustomize/templates/service.yaml | 27 + .../traefik/kustomize/traefik-service.yaml | 28 + setup/cluster/utils/README.md | 0 setup/cluster/utils/install.sh | 21 + .../{ => kustomize.template}/netdebug.yaml | 0 setup/cluster/validate-setup.sh | 276 +- setup/dnsmasq/.not_logged_in_yet | 6 +- setup/dnsmasq/README.md | 7 +- setup/dnsmasq/bin/create-setup-bundle.sh | 25 +- setup/dnsmasq/bin/transfer-setup-bundle.sh | 9 +- setup/dnsmasq/boot.ipxe | 4 +- setup/dnsmasq/dnsmasq.conf | 22 +- setup/dnsmasq/dnsmasq.reference | 14 +- 165 files changed, 15838 insertions(+), 1003 deletions(-) create mode 100755 bin/backup create mode 100755 bin/helm-chart-to-kustomize create mode 100755 bin/wild-compile-template-dir create mode 100755 bin/wild-secret create mode 100755 bin/wild-talos-iso create mode 100755 bin/wild-talos-schema create mode 100644 env.sh delete mode 100755 load-env.sh delete mode 100644 my-scaffold/.env.example create mode 100644 my-scaffold/.wildcloud/.gitkeep create mode 100644 my-scaffold/docs/node-setup.md create mode 100644 my-scaffold/env.sh rename {setup/cluster => scripts}/setup-utils.sh (100%) create mode 100755 setup/cluster-nodes/create-installer-image.sh create mode 100755 setup/cluster-nodes/detect-node-hardware.sh create mode 100644 setup/cluster-nodes/final/.gitkeep create mode 100755 setup/cluster-nodes/generate-machine-configs.sh create mode 100644 setup/cluster-nodes/generated/controlplane.yaml create mode 100644 setup/cluster-nodes/generated/secrets.yaml create mode 100644 setup/cluster-nodes/generated/talosconfig create mode 100644 setup/cluster-nodes/generated/worker.yaml create mode 100755 setup/cluster-nodes/init-cluster.sh delete mode 100755 setup/cluster-nodes/old/setup_node.sh create mode 100644 setup/cluster-nodes/patch.templates/controlplane-node-1.yaml create mode 100644 setup/cluster-nodes/patch.templates/controlplane-node-2.yaml create mode 100644 setup/cluster-nodes/patch.templates/controlplane-node-3.yaml create mode 100644 setup/cluster-nodes/patch.templates/worker.yaml create mode 100644 setup/cluster-nodes/patch/.gitkeep delete mode 100644 setup/cluster-nodes/patch/controlplane.yaml delete mode 100644 setup/cluster-nodes/patch/worker.yaml create mode 100644 setup/cluster/cert-manager/README.md rename setup/cluster/{setup-cert-manager.sh => cert-manager/install.sh} (67%) rename setup/cluster/cert-manager/{ => kustomize.template}/cert-manager.yaml (100%) create mode 100644 setup/cluster/cert-manager/kustomize.template/internal-wildcard-certificate.yaml create mode 100644 setup/cluster/cert-manager/kustomize.template/kustomization.yaml create mode 100644 setup/cluster/cert-manager/kustomize.template/letsencrypt-prod-dns01.yaml create mode 100644 setup/cluster/cert-manager/kustomize.template/letsencrypt-staging-dns01.yaml create mode 100644 setup/cluster/cert-manager/kustomize.template/namespace.yaml create mode 100644 setup/cluster/cert-manager/kustomize.template/wildcard-certificate.yaml create mode 100644 setup/cluster/cert-manager/kustomize/cert-manager.yaml rename setup/cluster/cert-manager/{ => kustomize}/internal-wildcard-certificate.yaml (84%) create mode 100644 setup/cluster/cert-manager/kustomize/kustomization.yaml rename setup/cluster/cert-manager/{ => kustomize}/letsencrypt-prod-dns01.yaml (87%) rename setup/cluster/cert-manager/{ => kustomize}/letsencrypt-staging-dns01.yaml (87%) create mode 100644 setup/cluster/cert-manager/kustomize/namespace.yaml rename setup/cluster/cert-manager/{ => kustomize}/wildcard-certificate.yaml (87%) create mode 100755 setup/cluster/coredns/install.sh create mode 100644 setup/cluster/coredns/kustomize.template/coredns-custom-config.yaml create mode 100644 setup/cluster/coredns/kustomize.template/coredns-lb-service.yaml rename setup/cluster/coredns/{ => kustomize}/coredns-custom-config.yaml (68%) rename setup/cluster/coredns/{ => kustomize}/coredns-lb-service.yaml (86%) create mode 100644 setup/cluster/docker-registry/README.md delete mode 100644 setup/cluster/docker-registry/config/example.env create mode 100755 setup/cluster/docker-registry/install.sh delete mode 100644 setup/cluster/docker-registry/kustomization.yaml rename setup/cluster/docker-registry/{ => kustomize.template}/deployment.yaml (100%) rename setup/cluster/docker-registry/{ => kustomize.template}/ingress.yaml (80%) create mode 100644 setup/cluster/docker-registry/kustomize.template/kustomization.yaml rename setup/cluster/docker-registry/{ => kustomize.template}/namespace.yaml (100%) create mode 100644 setup/cluster/docker-registry/kustomize.template/pvc.yaml rename setup/cluster/docker-registry/{ => kustomize.template}/service.yaml (100%) create mode 100644 setup/cluster/docker-registry/kustomize/deployment.yaml create mode 100644 setup/cluster/docker-registry/kustomize/ingress.yaml create mode 100644 setup/cluster/docker-registry/kustomize/kustomization.yaml create mode 100644 setup/cluster/docker-registry/kustomize/namespace.yaml rename setup/cluster/docker-registry/{ => kustomize}/pvc.yaml (100%) create mode 100644 setup/cluster/docker-registry/kustomize/service.yaml create mode 100755 setup/cluster/externaldns/install.sh create mode 100644 setup/cluster/externaldns/kustomize.template/externaldns-cloudflare.yaml rename setup/cluster/externaldns/{ => kustomize.template}/externaldns-rbac.yaml (100%) create mode 100644 setup/cluster/externaldns/kustomize.template/kustomization.yaml create mode 100644 setup/cluster/externaldns/kustomize.template/namespace.yaml rename setup/cluster/externaldns/{ => kustomize}/externaldns-cloudflare.yaml (95%) create mode 100644 setup/cluster/externaldns/kustomize/externaldns-rbac.yaml create mode 100644 setup/cluster/externaldns/kustomize/kustomization.yaml create mode 100644 setup/cluster/externaldns/kustomize/namespace.yaml create mode 100755 setup/cluster/install-all.sh create mode 100644 setup/cluster/kubernetes-dashboard/README.md create mode 100755 setup/cluster/kubernetes-dashboard/install.sh rename setup/cluster/kubernetes-dashboard/{ => kustomize.template}/dashboard-admin-rbac.yaml (100%) rename setup/cluster/kubernetes-dashboard/{ => kustomize.template}/dashboard-kube-system.yaml (82%) create mode 100644 setup/cluster/kubernetes-dashboard/kustomize.template/kustomization.yaml create mode 100644 setup/cluster/kubernetes-dashboard/kustomize/dashboard-admin-rbac.yaml create mode 100644 setup/cluster/kubernetes-dashboard/kustomize/dashboard-kube-system.yaml create mode 100644 setup/cluster/kubernetes-dashboard/kustomize/kustomization.yaml create mode 100755 setup/cluster/longhorn/install.sh rename setup/cluster/longhorn/{ => kustomize.template}/kustomization.yaml (100%) rename setup/cluster/longhorn/{ => kustomize.template}/longhorn.yaml (99%) create mode 100644 setup/cluster/longhorn/kustomize/kustomization.yaml create mode 100644 setup/cluster/longhorn/kustomize/longhorn.yaml create mode 100644 setup/cluster/metallb/README.md delete mode 100644 setup/cluster/metallb/configuration/kustomization.yaml rename setup/cluster/{setup-metallb.sh => metallb/install.sh} (50%) create mode 100644 setup/cluster/metallb/kustomize.template/configuration/kustomization.yaml rename setup/cluster/metallb/{ => kustomize.template}/configuration/pool.yaml (86%) rename setup/cluster/metallb/{ => kustomize.template}/installation/kustomization.yaml (100%) create mode 100644 setup/cluster/metallb/kustomize/configuration/kustomization.yaml create mode 100644 setup/cluster/metallb/kustomize/configuration/pool.yaml create mode 100644 setup/cluster/metallb/kustomize/installation/kustomization.yaml create mode 100644 setup/cluster/nfs/README.md rename setup/cluster/{setup-nfs.sh => nfs/install.sh} (83%) delete mode 100644 setup/cluster/nfs/kustomization.yaml create mode 100644 setup/cluster/nfs/kustomize.template/kustomization.yaml create mode 100644 setup/cluster/nfs/kustomize.template/persistent-volume.yaml create mode 100644 setup/cluster/nfs/kustomize.template/storage-class.yaml create mode 100644 setup/cluster/nfs/kustomize/kustomization.yaml rename setup/cluster/nfs/{ => kustomize}/persistent-volume.yaml (83%) rename setup/cluster/nfs/{ => kustomize}/storage-class.yaml (78%) rename setup/cluster/{ => nfs}/setup-nfs-host.sh (100%) delete mode 100755 setup/cluster/setup-all.sh delete mode 100755 setup/cluster/setup-coredns.sh delete mode 100755 setup/cluster/setup-dashboard.sh delete mode 100755 setup/cluster/setup-externaldns.sh delete mode 100755 setup/cluster/setup-longhorn.sh delete mode 100755 setup/cluster/setup-registry.sh delete mode 100755 setup/cluster/setup-traefik.sh create mode 100755 setup/cluster/traefik/install.sh rename setup/cluster/traefik/{ => kustomize.template}/internal-middleware.yaml (100%) create mode 100644 setup/cluster/traefik/kustomize.template/kustomization.yaml create mode 100644 setup/cluster/traefik/kustomize.template/namespace.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/deployment.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/gateway.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/gatewayclass.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/ingressclass.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/ingressroute.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/rbac/clusterrole.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/rbac/clusterrolebinding.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/rbac/serviceaccount.yaml create mode 100644 setup/cluster/traefik/kustomize.template/templates/service.yaml rename setup/cluster/traefik/{ => kustomize.template}/traefik-service.yaml (90%) create mode 100644 setup/cluster/traefik/kustomize/internal-middleware.yaml create mode 100644 setup/cluster/traefik/kustomize/kustomization.yaml create mode 100644 setup/cluster/traefik/kustomize/namespace.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/deployment.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/gateway.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/gatewayclass.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/ingressclass.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/ingressroute.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/rbac/clusterrole.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/rbac/clusterrolebinding.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/rbac/serviceaccount.yaml create mode 100644 setup/cluster/traefik/kustomize/templates/service.yaml create mode 100644 setup/cluster/traefik/kustomize/traefik-service.yaml create mode 100644 setup/cluster/utils/README.md create mode 100755 setup/cluster/utils/install.sh rename setup/cluster/utils/{ => kustomize.template}/netdebug.yaml (100%) diff --git a/.gitignore b/.gitignore index e6cc370..4030331 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -.env ca .bots/*/sessions .working diff --git a/bin/backup b/bin/backup new file mode 100755 index 0000000..217a9e7 --- /dev/null +++ b/bin/backup @@ -0,0 +1,23 @@ +#!/bin/bash +# Simple backup script for your personal cloud +# This is a placeholder for future implementation + +SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" +cd "$SCRIPT_DIR" +if [[ -f "../load-env.sh" ]]; then + source ../load-env.sh +fi + +BACKUP_DIR="${PROJECT_DIR}/backups/$(date +%Y-%m-%d)" +mkdir -p "$BACKUP_DIR" + +# Back up Kubernetes resources +kubectl get all -A -o yaml > "$BACKUP_DIR/all-resources.yaml" +kubectl get secrets -A -o yaml > "$BACKUP_DIR/secrets.yaml" +kubectl get configmaps -A -o yaml > "$BACKUP_DIR/configmaps.yaml" + +# Back up persistent volumes +# TODO: Add logic to back up persistent volume data + +echo "Backup completed: $BACKUP_DIR" diff --git a/bin/helm-chart-to-kustomize b/bin/helm-chart-to-kustomize new file mode 100755 index 0000000..06f938a --- /dev/null +++ b/bin/helm-chart-to-kustomize @@ -0,0 +1,136 @@ +#!/bin/bash +set -e +set -o pipefail + +usage() { + echo "Usage: helm-chart-to-kustomize [values-file]" + echo "" + echo "Convert a Helm chart to Kustomize manifests." + echo "" + echo "Arguments:" + echo " repo/chart Helm chart repository and name (e.g., nginx-stable/nginx-ingress)" + echo " release-name Name for the Helm release (e.g., ingress-controller)" + echo " namespace Kubernetes namespace to deploy to" + echo " values-file Optional values.yaml file for customization" + echo "" + echo "Examples:" + echo " helm-chart-to-kustomize nginx-stable/nginx-ingress ingress-controller ingress" + echo " helm-chart-to-kustomize nginx-stable/nginx-ingress ingress-controller ingress values.yaml" + echo "" + echo "Output:" + echo " Creates base// directory with Kustomize-ready manifests" +} + +# Parse arguments +if [[ $# -lt 3 || "$1" == "-h" || "$1" == "--help" ]]; then + usage + exit 0 +fi + +chart_repo="$1" +release_name="$2" +namespace="$3" +values_file="${4:-}" + +# Extract chart name from repo/chart +chart_name="${chart_repo##*/}" + +echo "Converting Helm chart to Kustomize: $chart_repo -> base/$release_name" + +# Create working directories +mkdir -p charts base + +# Fetch the Helm chart if not already present +if [[ -d "charts/$chart_name" ]]; then + echo "Chart '$chart_name' already exists in 'charts/' directory. Skipping fetch." +else + echo "Fetching Helm chart: $chart_repo" + + # Add repository if not already added + repo_name="$(echo "$chart_repo" | cut -d'/' -f1)" + if ! helm repo list 2>/dev/null | grep -q "^$repo_name"; then + echo "Adding Helm repository: $repo_name" + # Handle common repository URLs + case "$repo_name" in + "traefik") + helm repo add "$repo_name" "https://traefik.github.io/charts" + ;; + "nginx-stable") + helm repo add "$repo_name" "https://helm.nginx.com/stable" + ;; + *) + # Try generic helm.sh pattern first + helm repo add "$repo_name" "https://charts.helm.sh/$repo_name" 2>/dev/null || { + echo "Error: Unknown repository '$repo_name'. Please add manually with 'helm repo add'." + exit 1 + } + ;; + esac + helm repo update + fi + + if ! helm search repo "$chart_repo" >/dev/null 2>&1; then + echo "Error: Helm chart '$chart_repo' not found in repositories." + exit 1 + fi + + helm fetch --untar --untardir charts "$chart_repo" +fi + +# Build helm template command +template_cmd="helm template --output-dir base --namespace $namespace" +if [[ -n "$values_file" && -f "$values_file" ]]; then + template_cmd="$template_cmd --values $values_file" + echo "Using values file: $values_file" +fi +template_cmd="$template_cmd $release_name charts/$chart_name" + +# Clean existing base directory if it exists +if [[ -d "base/$release_name" ]]; then + echo "Existing base/$release_name directory found. Cleaning..." + rm -rf "base/$release_name" +fi + +# Generate manifests with Helm template +echo "Generating manifests with Helm template..." +eval "$template_cmd" + +# Create namespace manifest +echo "Creating namespace manifest..." +cat < "base/$release_name/namespace.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: $namespace +EOF + +# Generate kustomization.yaml +echo "Generating kustomization.yaml..." +cd "base/$release_name" + +# Find all YAML files recursively and create kustomization +resources=() +while IFS= read -r -d '' file; do + # Get relative path from current directory + rel_path="${file#./}" + resources+=("$rel_path") +done < <(find . -name "*.yaml" -not -name "kustomization.yaml" -print0 | sort -z) + +# Create kustomization.yaml with all resources +cat > kustomization.yaml << EOF +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +EOF + +for resource in "${resources[@]}"; do + echo "- $resource" >> kustomization.yaml +done + +echo "✅ Conversion complete!" +echo "" +echo "Generated files in: base/$release_name/" +echo "To apply with kubectl:" +echo " kubectl apply -k base/$release_name" +echo "" +echo "To customize further, edit the files in base/$release_name/ and regenerate kustomization.yaml if needed." \ No newline at end of file diff --git a/bin/wild-compile-template b/bin/wild-compile-template index 877737d..163244c 100755 --- a/bin/wild-compile-template +++ b/bin/wild-compile-template @@ -7,7 +7,7 @@ set -o pipefail usage() { echo "Usage: wild-compile-template [options]" echo "" - echo "Compile a gomplate template from stdin using ./config.yaml as context." + echo "Compile a gomplate template from stdin using \$WC_HOME/config.yaml as context." echo "" echo "Examples:" echo " echo 'Hello {{.config.cluster.name}}' | wild-compile-template" @@ -37,17 +37,26 @@ while [[ $# -gt 0 ]]; do esac done -if [ ! -f "./config.yaml" ]; then - echo "Error: ./config.yaml not found in current directory" >&2 +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set" >&2 exit 1 fi -# Build gomplate command with config context (enables .config shorthand) -gomplate_cmd="gomplate -c config=./config.yaml" +CONFIG_FILE="${WC_HOME}/config.yaml" +SECRETS_FILE="${WC_HOME}/secrets.yaml" + +if [ ! -f "${CONFIG_FILE}" ]; then + echo "Error: config.yaml not found at ${CONFIG_FILE}" >&2 + exit 1 +fi + +# Build gomplate command with config context +gomplate_cmd="gomplate -c .=${CONFIG_FILE}" # Add secrets context if secrets.yaml exists (enables .secrets shorthand) -if [ -f "./secrets.yaml" ]; then - gomplate_cmd="${gomplate_cmd} -c secrets=./secrets.yaml" +if [ -f "${SECRETS_FILE}" ]; then + gomplate_cmd="${gomplate_cmd} -c secrets=${SECRETS_FILE}" fi # Execute gomplate with stdin diff --git a/bin/wild-compile-template-dir b/bin/wild-compile-template-dir new file mode 100755 index 0000000..597629c --- /dev/null +++ b/bin/wild-compile-template-dir @@ -0,0 +1,98 @@ +#!/bin/bash +set -e +set -o pipefail + +usage() { + echo "Usage: wild-compile-template-dir [options] [dest_dir]" + echo "" + echo "Recursively copy all files from source_dir to dest_dir, processing text files through wild-compile-template." + echo "Binary files are copied as-is. Directory structure is preserved." + echo "" + echo "Options:" + echo " --clean Delete destination directory before processing" + echo " -h, --help Show this help message" + echo "" + echo "Arguments:" + echo " source_dir Source directory to process" + echo " dest_dir Destination directory (default: source_dir_compiled)" + echo "" + echo "Examples:" + echo " wild-compile-template-dir ./templates" + echo " wild-compile-template-dir ./templates ./output" + echo " wild-compile-template-dir --clean ./templates" + echo " wild-compile-template-dir --clean ./templates ./output" +} + +# Parse arguments +clean_flag=false +while [[ $# -gt 0 ]]; do + case $1 in + --clean) + clean_flag=true + shift + ;; + -h|--help) + usage + exit 0 + ;; + -*) + echo "Unknown option: $1" >&2 + usage + exit 1 + ;; + *) + break + ;; + esac +done + +if [[ $# -eq 0 ]]; then + usage + exit 0 +fi + +source_dir="$1" +dest_dir="${2:-${source_dir}_compiled}" + + +# Validate source directory +if [[ ! -d "$source_dir" ]]; then + echo "Error: Source directory does not exist: $source_dir" >&2 + exit 1 +fi + +# Clean destination directory if requested +if [[ "$clean_flag" == true && -d "$dest_dir" ]]; then + echo "Cleaning destination directory: $dest_dir" + rm -rf "$dest_dir" +fi + +# Create destination directory +mkdir -p "$dest_dir" + +echo "Processing directory: $source_dir -> $dest_dir" + +# Process all files recursively +find "$source_dir" -type f -print0 | while IFS= read -r -d '' file; do + # Get relative path from source directory + rel_path="${file#$source_dir/}" + dest_file="$dest_dir/$rel_path" + dest_file_dir="$(dirname "$dest_file")" + + # Create destination directory structure + mkdir -p "$dest_file_dir" + + # Check if file is text using file command + if file --mime-type "$file" 2>/dev/null | grep -q 'text/'; then + echo " Processing: $rel_path" + if ! cat "$file" | wild-compile-template > "$dest_file"; then + echo " ✗ Failed to process: $rel_path" >&2 + exit 1 + fi + else + echo " Copying: $rel_path" + cp "$file" "$dest_file" + fi +done + +echo "✅ Complete: All files processed successfully" \ No newline at end of file diff --git a/bin/wild-config b/bin/wild-config index 88450dd..6aa853b 100755 --- a/bin/wild-config +++ b/bin/wild-config @@ -7,7 +7,7 @@ set -o pipefail usage() { echo "Usage: wild-config " echo "" - echo "Read a value from ./config.yaml using a YAML key path." + echo "Read a value from \$WC_HOME/config.yaml using a YAML key path." echo "" echo "Examples:" echo " wild-config 'cluster.name' # Get cluster name" @@ -49,17 +49,25 @@ if [ -z "${KEY_PATH}" ]; then exit 1 fi -if [ ! -f "./config.yaml" ]; then - echo "Error: ./config.yaml not found in current directory" +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set" >&2 + exit 1 +fi + +CONFIG_FILE="${WC_HOME}/config.yaml" + +if [ ! -f "${CONFIG_FILE}" ]; then + echo "Error: config file not found at ${CONFIG_FILE}" >&2 exit 1 fi # Use yq to extract the value from the YAML file -result=$(yq eval ".${KEY_PATH}" ./config.yaml) +result=$(yq eval ".${KEY_PATH}" "${CONFIG_FILE}") 2>/dev/null # Check if result is null (key not found) if [ "${result}" = "null" ]; then - echo "Error: Key path '${KEY_PATH}' not found in ./config.yaml" >&2 + echo "Error: Key path '${KEY_PATH}' not found in ${CONFIG_FILE}" >&2 exit 1 fi diff --git a/bin/wild-secret b/bin/wild-secret new file mode 100755 index 0000000..350e758 --- /dev/null +++ b/bin/wild-secret @@ -0,0 +1,74 @@ +#!/bin/bash + +set -e +set -o pipefail + +# Usage function +usage() { + echo "Usage: wild-secret " + echo "" + echo "Read a value from ./secrets.yaml using a YAML key path." + echo "" + echo "Examples:" + echo " wild-secret 'database.password' # Get database password" + echo " wild-secret 'api.keys.github' # Get GitHub API key" + echo " wild-secret 'credentials[0].token' # Get first credential token" + echo "" + echo "Options:" + echo " -h, --help Show this help message" +} + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + exit 0 + ;; + -*) + echo "Unknown option $1" + usage + exit 1 + ;; + *) + if [ -z "${KEY_PATH}" ]; then + KEY_PATH="$1" + else + echo "Too many arguments" + usage + exit 1 + fi + shift + ;; + esac +done + +if [ -z "${KEY_PATH}" ]; then + echo "Error: YAML key path is required" + usage + exit 1 +fi + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set" >&2 + exit 1 +fi + +SECRETS_FILE="${WC_HOME}/secrets.yaml" + +if [ ! -f "${SECRETS_FILE}" ]; then + echo "Error: secrets file not found at ${SECRETS_FILE}" >&2 + exit 1 +fi + +# Use yq to extract the value from the YAML file +result=$(yq eval ".${KEY_PATH}" "${SECRETS_FILE}" 2>/dev/null) + +# Check if result is null (key not found) +if [ "${result}" = "null" ]; then + echo "Error: Key path '${KEY_PATH}' not found in ${SECRETS_FILE}" >&2 + exit 1 +fi + +echo "${result}" \ No newline at end of file diff --git a/bin/wild-talos-iso b/bin/wild-talos-iso new file mode 100755 index 0000000..cb3f47c --- /dev/null +++ b/bin/wild-talos-iso @@ -0,0 +1,137 @@ +#!/bin/bash + +# Talos ISO download script +# Downloads custom Talos ISO with system extensions for USB boot + +set -euo pipefail + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source .env\`." + exit 1 +fi + +CONFIG_FILE="${WC_HOME}/config.yaml" +ISO_DIR="${WC_HOME}/.wildcloud/iso" +FORCE_DOWNLOAD=false + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --force) + FORCE_DOWNLOAD=true + shift + ;; + -h|--help) + echo "Usage: wild-talos-iso [--force]" + echo "" + echo "Downloads custom Talos ISO with system extensions for USB boot." + echo "" + echo "Options:" + echo " --force Force re-download even if ISO already exists" + echo " -h, --help Show this help message" + echo "" + echo "This script:" + echo " 1. Gets schematic ID and Talos version from config.yaml" + echo " 2. Downloads custom ISO from Talos Image Factory" + echo " 3. Saves ISO to .wildcloud/iso/ directory" + echo "" + echo "The ISO includes extensions configured in config.yaml:" + echo " (.cluster.nodes.talos.schematic.customization.systemExtensions)" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +echo "Downloading custom Talos ISO with system extensions..." + +# Get Talos version and schematic ID from config +TALOS_VERSION=$(yq eval '.cluster.nodes.talos.version' "$CONFIG_FILE") +SCHEMATIC_ID=$(yq eval '.cluster.nodes.talos.schematicId // ""' "$CONFIG_FILE") + +if [ -z "$TALOS_VERSION" ] || [ "$TALOS_VERSION" = "null" ]; then + echo "Error: No Talos version found in config.yaml at .cluster.nodes.talos.version" + exit 1 +fi + +if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "null" ]; then + echo "Error: No schematic ID found in config.yaml" + echo "Run 'wild-talos-schema' first to upload schematic and get ID" + exit 1 +fi + +echo "Talos version: $TALOS_VERSION" +echo "Schematic ID: $SCHEMATIC_ID" +echo "" +echo "ISO includes extensions:" +yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "$CONFIG_FILE" | sed 's/^/ - /' +echo "" + +# Create ISO directory +mkdir -p "$ISO_DIR" + +# Define ISO filename and path +ISO_FILENAME="talos-${TALOS_VERSION}-metal-amd64.iso" +ISO_PATH="${ISO_DIR}/${ISO_FILENAME}" + +# Check if ISO already exists +if [ -f "$ISO_PATH" ] && [ "$FORCE_DOWNLOAD" = false ]; then + echo "✅ ISO already exists: $ISO_PATH" + echo "Use --force to re-download" + echo "" + echo "To create a bootable USB:" + echo " See docs/node_setup.md for USB creation instructions" + exit 0 +fi + +# Download ISO from Image Factory +ISO_URL="https://factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/metal-amd64.iso" +echo "Downloading ISO from: $ISO_URL" +echo "Saving to: $ISO_PATH" +echo "" + +# Download with progress bar +if command -v wget >/dev/null 2>&1; then + wget --progress=bar:force -O "$ISO_PATH" "$ISO_URL" +elif command -v curl >/dev/null 2>&1; then + curl -L --progress-bar -o "$ISO_PATH" "$ISO_URL" +else + echo "Error: Neither wget nor curl is available for downloading" + exit 1 +fi + +# Verify download +if [ ! -f "$ISO_PATH" ] || [ ! -s "$ISO_PATH" ]; then + echo "Error: Download failed or file is empty" + rm -f "$ISO_PATH" + exit 1 +fi + +# Get file size for verification +FILE_SIZE=$(du -h "$ISO_PATH" | cut -f1) + +echo "" +echo "✅ Custom Talos ISO downloaded successfully!" +echo "" +echo "ISO Details:" +echo " File: $ISO_PATH" +echo " Size: $FILE_SIZE" +echo " Version: $TALOS_VERSION" +echo " Extensions: $(yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions | length' "$CONFIG_FILE") extensions included" +echo " Auto-wipe: Enabled (will wipe existing Talos installations)" +echo "" +echo "Next steps:" +echo "1. Create bootable USB drive (see docs/node_setup.md)" +echo "2. Boot target machine from USB" +echo "3. Run hardware detection: ./detect-node-hardware.sh " +echo "4. Apply machine configuration" +echo "" +echo "USB Creation Quick Reference:" +echo " Linux: sudo dd if=$ISO_PATH of=/dev/sdX bs=4M status=progress" +echo " macOS: sudo dd if=$ISO_PATH of=/dev/rdiskX bs=4m" +echo " Windows: Use Rufus, Balena Etcher, or similar tool" \ No newline at end of file diff --git a/bin/wild-talos-schema b/bin/wild-talos-schema new file mode 100755 index 0000000..28ddb69 --- /dev/null +++ b/bin/wild-talos-schema @@ -0,0 +1,113 @@ +#!/bin/bash + +# Talos schematic management script +# This script manages Talos Image Factory schematics centrally +# Usage: wild-talos-schema [--force] + +set -euo pipefail + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source .env\`." + exit 1 +fi + +CONFIG_FILE="${WC_HOME}/config.yaml" +FORCE_UPLOAD=false + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --force) + FORCE_UPLOAD=true + shift + ;; + -h|--help) + echo "Usage: wild-talos-schema [--force]" + echo "" + echo "Manages Talos Image Factory schematics centrally." + echo "" + echo "Options:" + echo " --force Force re-upload even if schematicId already exists" + echo " -h, --help Show this help message" + echo "" + echo "This script:" + echo " 1. Reads schematic from config.yaml (.cluster.nodes.talos.schematic)" + echo " 2. Uploads it to Image Factory if needed" + echo " 3. Stores the schematicId in config.yaml (.cluster.nodes.talos.schematicId)" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +echo "Managing Talos schematic for wildcloud..." + +# Check if schematic exists in config.yaml +if ! yq eval '.cluster.nodes.talos.schematic' "$CONFIG_FILE" | grep -v "null" >/dev/null 2>&1; then + echo "Error: No schematic found in config.yaml at .cluster.nodes.talos.schematic" + echo "Expected schematic configuration with systemExtensions" + exit 1 +fi + +# Check if schematicId already exists (unless force) +EXISTING_ID=$(yq eval '.cluster.nodes.talos.schematicId // ""' "$CONFIG_FILE") +if [ -n "$EXISTING_ID" ] && [ "$FORCE_UPLOAD" = false ]; then + echo "✅ Schematic ID already exists: $EXISTING_ID" + echo "Use --force to re-upload and generate a new ID" + exit 0 +fi + +echo "Extracting schematic from config.yaml..." + +# Create temporary schematic file +TEMP_SCHEMATIC=$(mktemp) +trap "rm -f $TEMP_SCHEMATIC" EXIT + +# Extract schematic from config.yaml +yq eval '.cluster.nodes.talos.schematic' "$CONFIG_FILE" > "$TEMP_SCHEMATIC" + +echo "Schematic contents:" +cat "$TEMP_SCHEMATIC" +echo "" + +# Upload schematic to Image Factory +echo "Uploading schematic to Talos Image Factory..." +SCHEMATIC_RESPONSE=$(curl -s -X POST --data-binary @"$TEMP_SCHEMATIC" https://factory.talos.dev/schematics) + +if [ -z "$SCHEMATIC_RESPONSE" ]; then + echo "Error: Failed to upload schematic to Image Factory" + exit 1 +fi + +# Parse schematic ID from JSON response +SCHEMATIC_ID=$(echo "$SCHEMATIC_RESPONSE" | sed 's/.*"id":"\([^"]*\)".*/\1/') + +if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "$SCHEMATIC_RESPONSE" ]; then + echo "Error: Failed to parse schematic ID from response: $SCHEMATIC_RESPONSE" + exit 1 +fi + +echo "✅ Schematic uploaded successfully!" +echo "Schematic ID: $SCHEMATIC_ID" + +# Update config.yaml with schematic ID +echo "Updating config.yaml with schematic ID..." +yq eval ".cluster.nodes.talos.schematicId = \"$SCHEMATIC_ID\"" -i "$CONFIG_FILE" + +echo "" +echo "🎉 Schematic management complete!" +echo "" +echo "Schematic ID: $SCHEMATIC_ID" +echo "Saved to: config.yaml (.cluster.nodes.talos.schematicId)" +echo "" +echo "This schematic includes:" +yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "$CONFIG_FILE" | sed 's/^/ - /' +echo "" +echo "Other scripts can now use this schematicId:" +echo " - setup/dnsmasq/bin/create-setup-bundle.sh (PXE boot assets)" +echo " - setup/cluster-nodes/create-installer-image.sh (custom installer)" \ No newline at end of file diff --git a/docs/SETUP.md b/docs/SETUP.md index 7ec832c..f26a783 100644 --- a/docs/SETUP.md +++ b/docs/SETUP.md @@ -1,5 +1,11 @@ # Setting Up Your Wild-cloud +Install dependencies: + +```bash +scripts/setup-utils.sh +``` + Add the `bin` directory to your path. Initialize a personal wild-cloud in any empty directory, for example: diff --git a/env.sh b/env.sh new file mode 100644 index 0000000..937351a --- /dev/null +++ b/env.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Set the WC_HOME environment variable to this script's directory. +# This variable is used consistently across the Wild Config scripts. +export WC_HOME="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" && pwd)" + +# Add bin to path first so wild-config is available +export PATH="$WC_HOME/bin:$PATH" + +# Install kubectl +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed. Installing." + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +fi + +# Install talosctl +if ! command -v talosctl &> /dev/null; then + echo "Error: talosctl is not installed. Installing." + curl -sL https://talos.dev/install | sh +fi + + +# Check if gomplate is installed +if ! command -v gomplate &> /dev/null; then + echo "Error: gomplate is not installed. Please install gomplate first." + echo "Visit: https://docs.gomplate.ca/installing/" + exit 1 +fi + +KUBECONFIG=~/.kube/config +export KUBECONFIG + +# Use cluster name as both talos and kubectl context name +CLUSTER_NAME=$(wild-config cluster.name) +if [ -z "${CLUSTER_NAME}" ] || [ "${CLUSTER_NAME}" = "null" ]; then + echo "Error: cluster.name not set in config.yaml" + exit 1 +fi + +# Only try to use the kubectl context if it exists +if kubectl config get-contexts "${CLUSTER_NAME}" >/dev/null 2>&1; then + kubectl config use-context "${CLUSTER_NAME}" + echo "Using Kubernetes context: ${CLUSTER_NAME}" +# else +# echo "Kubernetes context '${CLUSTER_NAME}' not found, skipping context switch" +fi diff --git a/load-env.sh b/load-env.sh deleted file mode 100755 index 705ac0d..0000000 --- a/load-env.sh +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env bash -# This script sources environment variables from: -# 1. The root .env file -# 2. App-specific .env files from enabled apps (with install=true in manifest.yaml) -# Dependencies are respected - if app A requires app B, app B's .env is sourced first -# set -e - -PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -ENV_FILE="$PROJECT_DIR/.env" -BIN_DIR="$PROJECT_DIR/bin" -APPS_DIR="$PROJECT_DIR/apps" - -# Check if yq is installed -if ! command -v yq &> /dev/null; then - echo "Error: yq is not installed. Please install it first." - echo "You can install it with: wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq" - exit 1 -fi - -# Source the main .env file -if [ ! -f "$ENV_FILE" ]; then - echo "Error: Environment file not found: $ENV_FILE" - exit 1 -fi - -# Turn on allexport to automatically export all variables -set -a -source "$ENV_FILE" -set +a - -# Function to parse YAML using yq -parse_yaml() { - local yaml_file=$1 - - # Extract the values we need using yq - local name=$(yq eval '.name' "$yaml_file") - local install=$(yq eval '.install' "$yaml_file") - - # Convert boolean to 1/0 for consistency - if [ "$install" = "true" ]; then - install="1" - elif [ "$install" = "false" ]; then - install="0" - fi - - # Get dependencies as space-separated string - local requires="" - if yq eval 'has("requires")' "$yaml_file" | grep -q "true"; then - requires=$(yq eval '.requires[].name' "$yaml_file" | tr '\n' ' ' | sed 's/ $//') - fi - - # Return the parsed data as a single line - echo "$name|$install|$requires" -} - -# Resolve dependencies and create a list of apps to source in the right order -resolve_dependencies() { - local apps=() - local apps_to_install=() - local deps_map=() - - # Parse all manifest files - for manifest in "$APPS_DIR"/*/manifest.yaml; do - local app_dir=$(dirname "$manifest") - local app_name=$(basename "$app_dir") - - local parsed_data=$(parse_yaml "$manifest") - IFS='|' read -r name install requires <<< "$parsed_data" - - # Add to our arrays - apps+=("$name") - if [ "$install" = "1" ] || [ "$install" = "true" ]; then - apps_to_install+=("$name") - deps_map+=("$name:$requires") - fi - done - - # Create an ordered list with dependencies first - local ordered=() - - # First add apps with no dependencies - for app in "${apps_to_install[@]}"; do - local has_deps=false - for dep_entry in "${deps_map[@]}"; do - local app_name=$(echo "$dep_entry" | cut -d':' -f1) - local deps=$(echo "$dep_entry" | cut -d':' -f2) - - if [ "$app_name" = "$app" ] && [ -n "$deps" ]; then - has_deps=true - break - fi - done - - if [ "$has_deps" = false ]; then - ordered+=("$app") - fi - done - - # Now add apps with resolved dependencies - local remaining=() - for app in "${apps_to_install[@]}"; do - if ! echo " ${ordered[*]} " | grep -q " $app "; then - remaining+=("$app") - fi - done - - while [ ${#remaining[@]} -gt 0 ]; do - local progress=false - - for app in "${remaining[@]}"; do - local all_deps_resolved=true - - # Find the dependencies for this app - local app_deps="" - for dep_entry in "${deps_map[@]}"; do - local app_name=$(echo "$dep_entry" | cut -d':' -f1) - local deps=$(echo "$dep_entry" | cut -d':' -f2) - - if [ "$app_name" = "$app" ]; then - app_deps="$deps" - break - fi - done - - # Check if all dependencies are in the ordered list - if [ -n "$app_deps" ]; then - for dep in $app_deps; do - if ! echo " ${ordered[*]} " | grep -q " $dep "; then - all_deps_resolved=false - break - fi - done - fi - - if [ "$all_deps_resolved" = true ]; then - ordered+=("$app") - progress=true - fi - done - - # If no progress was made, we have a circular dependency - if [ "$progress" = false ]; then - echo "Warning: Circular dependency detected in app manifests" - # Add remaining apps to avoid getting stuck - ordered+=("${remaining[@]}") - break - fi - - # Update remaining list - local new_remaining=() - for app in "${remaining[@]}"; do - if ! echo " ${ordered[*]} " | grep -q " $app "; then - new_remaining+=("$app") - fi - done - remaining=("${new_remaining[@]}") - done - - echo "${ordered[@]}" -} - -# Get ordered list of apps to source -ordered_apps=($(resolve_dependencies)) - -# Source app .env files in dependency order -# echo "Sourcing app environment files..." -for app in "${ordered_apps[@]}"; do - app_env_file="$APPS_DIR/$app/config/.env" - if [ -f "$app_env_file" ]; then - # echo " - $app" - set -a - source "$app_env_file" - set +a - fi -done - -# Add bin directory to PATH -export PATH="$BIN_DIR:$PATH" \ No newline at end of file diff --git a/my-scaffold/.env.example b/my-scaffold/.env.example deleted file mode 100644 index a6d4887..0000000 --- a/my-scaffold/.env.example +++ /dev/null @@ -1,2 +0,0 @@ -KUBECONFIG=~/.kube/config -export KUBECONFIG diff --git a/my-scaffold/.gitignore b/my-scaffold/.gitignore index 5950e97..5402926 100644 --- a/my-scaffold/.gitignore +++ b/my-scaffold/.gitignore @@ -1,4 +1,6 @@ secrets.yaml .wildcloud/cache .bots/*/sessions -backup/ \ No newline at end of file +backup/ +.working +.claude diff --git a/my-scaffold/.wildcloud/.gitkeep b/my-scaffold/.wildcloud/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/my-scaffold/config.example.yaml b/my-scaffold/config.example.yaml index 4a5f581..d8c62e1 100644 --- a/my-scaffold/config.example.yaml +++ b/my-scaffold/config.example.yaml @@ -1,6 +1,5 @@ wildcloud: - # You can also use a local path for the repository such as /home/adam/repos/wild-cloud - repository: https://github.com/payneio/wild-cloud + repository: /home/adam/wildcloud operator: email: adam@adam.tld cloud: @@ -17,15 +16,40 @@ cloud: storageCapacity: 250Gi dns: ip: 192.168.8.218 + externalResolver: 1.1.1.1 dhcpRange: 192.168.8.100,192.168.8.239 - dnsmasqInterface: enp5s0 + dnsmasq: + interface: enp5s0 + username: adam cluster: - endpoint: computer-01 - endpointIp: 192.168.8.241 + nodes: + talos: + version: v1.10.3 + schematic: + customization: + extraKernelArgs: + - -talos.halt_if_installed + systemExtensions: + officialExtensions: + - siderolabs/gvisor + - siderolabs/intel-ucode + - siderolabs/iscsi-tools + - siderolabs/util-linux-tools + schematicId: TBD + control: + vip: 192.168.8.30 + node1: + ip: 192.168.8.31 + node2: + ip: 192.168.8.32 + node3: + ip: 192.168.8.33 + name: adam-cluster + ipaddressPool: 192.168.8.20-192.168.8.29 + loadBalancerIp: 192.168.8.20 kubernetes: config: /home/adam/.kube/config context: default - loadBalancerRange: 192.168.8.240-192.168.8.250 dashboard: adminUsername: admin certManager: @@ -33,6 +57,7 @@ cluster: cloudflare: domain: adam.tld ownerId: cloud-adam-cluster - nodes: - talos: - version: v1.10.3 + externalDns: + ownerId: cloud-adam-cluster + dockerRegistry: + storage: 10Gi diff --git a/my-scaffold/docs/node-setup.md b/my-scaffold/docs/node-setup.md new file mode 100644 index 0000000..5394796 --- /dev/null +++ b/my-scaffold/docs/node-setup.md @@ -0,0 +1,246 @@ +# Node Setup Guide + +This guide covers setting up Talos Linux nodes for your Kubernetes cluster using USB boot. + +## Overview + +There are two main approaches for booting Talos nodes: + +1. **USB Boot** (covered here) - Boot from a custom USB drive with system extensions +2. **PXE Boot** - Network boot using dnsmasq setup (see `setup/dnsmasq/README.md`) + +## USB Boot Setup + +### Prerequisites + +- Target hardware for Kubernetes nodes +- USB drive (8GB+ recommended) +- Admin access to create bootable USB drives + +### Step 1: Upload Schematic and Download Custom Talos ISO + +First, upload the system extensions schematic to Talos Image Factory, then download the custom ISO. + +```bash +# Upload schematic configuration to get schematic ID +wild-talos-schema + +# Download custom ISO with system extensions +wild-talos-iso +``` + +The custom ISO includes system extensions (iscsi-tools, util-linux-tools, intel-ucode, gvisor) needed for the cluster and is saved to `.wildcloud/iso/talos-v1.10.3-metal-amd64.iso`. + +### Step 2: Create Bootable USB Drive + +#### Linux (Recommended) + +```bash +# Find your USB device (be careful to select the right device!) +lsblk +sudo dmesg | tail # Check for recently connected USB devices + +# Create bootable USB (replace /dev/sdX with your USB device) +sudo dd if=.wildcloud/iso/talos-v1.10.3-metal-amd64.iso of=/dev/sdX bs=4M status=progress sync + +# Verify the write completed +sync +``` + +**⚠️ Warning**: Double-check the device path (`/dev/sdX`). Writing to the wrong device will destroy data! + +#### macOS + +```bash +# Find your USB device +diskutil list + +# Unmount the USB drive (replace diskX with your USB device) +diskutil unmountDisk /dev/diskX + +# Create bootable USB +sudo dd if=.wildcloud/iso/talos-v1.10.3-metal-amd64.iso of=/dev/rdiskX bs=4m + +# Eject when complete +diskutil eject /dev/diskX +``` + +#### Windows + +Use one of these tools: + +1. **Rufus** (Recommended) + + - Download from https://rufus.ie/ + - Select the Talos ISO file + - Choose your USB drive + - Use "DD Image" mode + - Click "START" + +2. **Balena Etcher** + + - Download from https://www.balena.io/etcher/ + - Flash from file → Select Talos ISO + - Select target USB drive + - Flash! + +3. **Command Line** (Windows 10/11) + + ```cmd + # List disks to find USB drive number + diskpart + list disk + exit + + # Write ISO (replace X with your USB disk number) + dd if=.wildcloud\iso\talos-v1.10.3-metal-amd64.iso of=\\.\PhysicalDriveX bs=4M --progress + ``` + +### Step 3: Boot Target Machine + +1. **Insert USB** into target machine +2. **Boot from USB**: + - Restart machine and enter BIOS/UEFI (usually F2, F12, DEL, or ESC during startup) + - Change boot order to prioritize USB drive + - Or use one-time boot menu (usually F12) +3. **Talos will boot** in maintenance mode with a DHCP IP + +### Step 4: Hardware Detection and Configuration + +Once the machine boots, it will be in maintenance mode with a DHCP IP address. + +```bash +# Find the node's maintenance IP (check your router/DHCP server) +# Then detect hardware and register the node +cd setup/cluster-nodes +./detect-node-hardware.sh + +# Example: Node got DHCP IP 192.168.8.150, registering as node 1 +./detect-node-hardware.sh 192.168.8.150 1 +``` + +This script will: + +- Discover network interface names (e.g., `enp4s0`) +- List available disks for installation +- Update `config.yaml` with node-specific hardware settings + +### Step 5: Generate and Apply Configuration + +```bash +# Generate machine configurations with detected hardware +./generate-machine-configs.sh + +# Apply configuration (node will reboot with static IP) +talosctl apply-config --insecure -n --file final/controlplane-node-.yaml + +# Example: +talosctl apply-config --insecure -n 192.168.8.150 --file final/controlplane-node-1.yaml +``` + +### Step 6: Verify Installation + +After reboot, the node should come up with its assigned static IP: + +```bash +# Check connectivity (node 1 should be at 192.168.8.31) +ping 192.168.8.31 + +# Verify system extensions are installed +talosctl -e 192.168.8.31 -n 192.168.8.31 get extensions + +# Check for iscsi tools +talosctl -e 192.168.8.31 -n 192.168.8.31 list /usr/local/bin/ | grep iscsi +``` + +## Repeat for Additional Nodes + +For each additional control plane node: + +1. Boot with the same USB drive +2. Run hardware detection with the new maintenance IP and node number +3. Generate and apply configurations +4. Verify the node comes up at its static IP + +Example for node 2: + +```bash +./detect-node-hardware.sh 192.168.8.151 2 +./generate-machine-configs.sh +talosctl apply-config --insecure -n 192.168.8.151 --file final/controlplane-node-2.yaml +``` + +## Cluster Bootstrap + +Once all control plane nodes are configured: + +```bash +# Bootstrap the cluster using the VIP +talosctl bootstrap -n 192.168.8.30 + +# Get kubeconfig +talosctl kubeconfig + +# Verify cluster +kubectl get nodes +``` + +## Troubleshooting + +### USB Boot Issues + +- **Machine won't boot from USB**: Check BIOS boot order, disable Secure Boot if needed +- **Talos doesn't start**: Verify ISO was written correctly, try re-creating USB +- **Network issues**: Ensure DHCP is available on your network + +### Hardware Detection Issues + +- **Node not accessible**: Check IP assignment, firewall settings +- **Wrong interface detected**: Manual override in `config.yaml` if needed +- **Disk not found**: Verify disk size (must be >10GB), check disk health + +### Installation Issues + +- **Static IP not assigned**: Check network configuration in machine config +- **Extensions not installed**: Verify ISO includes extensions, check upgrade logs +- **Node won't join cluster**: Check certificates, network connectivity to VIP + +### Checking Logs + +```bash +# View system logs +talosctl -e -n logs machined + +# Check kernel messages +talosctl -e -n dmesg + +# Monitor services +talosctl -e -n get services +``` + +## System Extensions Included + +The custom ISO includes these extensions: + +- **siderolabs/iscsi-tools**: iSCSI initiator tools for persistent storage +- **siderolabs/util-linux-tools**: Utility tools including fstrim for storage +- **siderolabs/intel-ucode**: Intel CPU microcode updates (harmless on AMD) +- **siderolabs/gvisor**: Container runtime sandbox (optional security enhancement) + +These extensions enable: + +- Longhorn distributed storage +- Improved security isolation +- CPU microcode updates +- Storage optimization tools + +## Next Steps + +After all nodes are configured: + +1. **Install CNI**: Deploy a Container Network Interface (Cilium, Calico, etc.) +2. **Install CSI**: Deploy Container Storage Interface (Longhorn for persistent storage) +3. **Deploy workloads**: Your applications and services +4. **Monitor cluster**: Set up monitoring and logging + +See the main project documentation for application deployment guides. diff --git a/my-scaffold/env.sh b/my-scaffold/env.sh new file mode 100644 index 0000000..937351a --- /dev/null +++ b/my-scaffold/env.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Set the WC_HOME environment variable to this script's directory. +# This variable is used consistently across the Wild Config scripts. +export WC_HOME="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" && pwd)" + +# Add bin to path first so wild-config is available +export PATH="$WC_HOME/bin:$PATH" + +# Install kubectl +if ! command -v kubectl &> /dev/null; then + echo "Error: kubectl is not installed. Installing." + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +fi + +# Install talosctl +if ! command -v talosctl &> /dev/null; then + echo "Error: talosctl is not installed. Installing." + curl -sL https://talos.dev/install | sh +fi + + +# Check if gomplate is installed +if ! command -v gomplate &> /dev/null; then + echo "Error: gomplate is not installed. Please install gomplate first." + echo "Visit: https://docs.gomplate.ca/installing/" + exit 1 +fi + +KUBECONFIG=~/.kube/config +export KUBECONFIG + +# Use cluster name as both talos and kubectl context name +CLUSTER_NAME=$(wild-config cluster.name) +if [ -z "${CLUSTER_NAME}" ] || [ "${CLUSTER_NAME}" = "null" ]; then + echo "Error: cluster.name not set in config.yaml" + exit 1 +fi + +# Only try to use the kubectl context if it exists +if kubectl config get-contexts "${CLUSTER_NAME}" >/dev/null 2>&1; then + kubectl config use-context "${CLUSTER_NAME}" + echo "Using Kubernetes context: ${CLUSTER_NAME}" +# else +# echo "Kubernetes context '${CLUSTER_NAME}' not found, skipping context switch" +fi diff --git a/setup/cluster/setup-utils.sh b/scripts/setup-utils.sh similarity index 100% rename from setup/cluster/setup-utils.sh rename to scripts/setup-utils.sh diff --git a/setup/README.md b/setup/README.md index 968a2c8..0dd974b 100644 --- a/setup/README.md +++ b/setup/README.md @@ -1,5 +1,7 @@ # Setup instructions +Install dependencies: + Follow the instructions to [set up a dnsmasq machine](./dnsmasq/README.md). Follow the instructions to [set up cluster nodes](./cluster-nodes/README.md). diff --git a/setup/cluster-nodes/README.md b/setup/cluster-nodes/README.md index 6b8ab1d..4df923e 100644 --- a/setup/cluster-nodes/README.md +++ b/setup/cluster-nodes/README.md @@ -1,90 +1,235 @@ # Cluster Node Setup -Cluster node setup is WIP. Any kubernetes setup will do. Currently, we have a working cluster using each of these methods and are moving towards Talos. +This directory contains automation for setting up Talos Kubernetes cluster nodes with static IP configuration. -## k3s cluster node setup +## Hardware Detection and Setup (Recommended) -K3s provides a fully-compliant Kubernetes distribution in a small footprint. +The automated setup discovers hardware configuration from nodes in maintenance mode and generates machine configurations with the correct interface names and disk paths. -To set up control nodes: +### Prerequisites + +1. `source .env` +2. Boot nodes with Talos ISO in maintenance mode +3. Nodes must be accessible on the network + +### Hardware Discovery Workflow ```bash -# Install K3s without the default load balancer (we'll use MetalLB) -curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode=644 --disable servicelb --disable metallb +# ONE-TIME CLUSTER INITIALIZATION (run once per cluster) +./init-cluster.sh -# Set up kubectl configuration -mkdir -p ~/.kube -sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/config -chmod 600 ~/.kube/config +# FOR EACH CONTROL PLANE NODE: + +# 1. Boot node with Talos ISO (it will get a DHCP IP in maintenance mode) +# 2. Detect hardware and update config.yaml +./detect-node-hardware.sh + +# Example: Node boots at 192.168.8.168, register as node 1 +./detect-node-hardware.sh 192.168.8.168 1 + +# 3. Generate machine config for registered nodes +./generate-machine-configs.sh + +# 4. Apply configuration - node will reboot with static IP +talosctl apply-config --insecure -n 192.168.8.168 --file final/controlplane-node-1.yaml + +# 5. Wait for reboot, node should come up at its target static IP (192.168.8.31) + +# Repeat steps 1-5 for additional control plane nodes ``` -Set up the infrastructure services after these are running, then you can add more worker nodes with: +The `detect-node-hardware.sh` script will: + +- Connect to nodes in maintenance mode via talosctl +- Discover active ethernet interfaces (e.g., `enp4s0` instead of hardcoded `eth0`) +- Discover available installation disks (>10GB) +- Update `config.yaml` with per-node hardware configuration +- Provide next steps for machine config generation + +The `init-cluster.sh` script will: + +- Generate Talos cluster secrets and base configurations (once per cluster) +- Set up talosctl context with cluster certificates +- Configure VIP endpoint for cluster communication + +The `generate-machine-configs.sh` script will: + +- Check which nodes have been hardware-detected +- Compile network configuration templates with discovered hardware settings +- Create final machine configurations for registered nodes only +- Include system extensions for Longhorn (iscsi-tools, util-linux-tools) +- Update talosctl context with registered node IPs + +### Cluster Bootstrap + +After all control plane nodes are configured with static IPs: ```bash -# On your master node, get the node token -NODE_TOKEN=`sudo cat /var/lib/rancher/k3s/server/node-token` -MASTER_IP=192.168.8.222 -# On each new node, join the cluster +# Bootstrap the cluster using any control node +talosctl bootstrap --nodes 192.168.8.31 --endpoint 192.168.8.31 -curl -sfL https://get.k3s.io | K3S_URL=https://$MASTER_IP:6443 K3S_TOKEN=$NODE_TOKEN sh - -``` -## Talos cluster node setup - -This is a new experimental method for setting up cluster nodes. We're currently working through the simplest bootstrapping experience. - -Currently, though, all these steps are manual. - -Copy this entire directory to your personal cloud folder and modify it as necessary as you install. We suggest putting it in `cluster/bootstrap`. - -```bash - -# Install kubectl -curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" -echo "$(cat kubectl.sha256) kubectl" | sha256sum --check -sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - -# Install talosctl -curl -sL https://talos.dev/install | sh - -# In your LAN Router (which is your DHCP server), - -CLUSTER_NAME=test-cluster -VIP=192.168.8.20 # Non-DHCP - -# Boot your nodes with the ISO and put their IP addresses here. Pin in DHCP. -# Nodes must all be on the same switch. -# TODO: How to set these static on boot? -CONTROL_NODE_1=192.168.8.21 -CONTROL_NODE_2=192.168.8.22 -CONTROL_NODE_3=192.168.8.23 - -# Generate cluster config files (including pki and tokens) -cd generated -talosctl gen secrets -o secrets.yaml -talosctl gen config --with-secrets secrets.yaml $CLUSTER_NAME https://$VIP:6443 -talosctl config merge ./talosconfig -cd .. - -# If the disk you want to install Talos on isn't /dev/sda, you should -# update to the disk you want in patch/controlplane.yml and patch/worker.yaml. If you have already attempted to install a node and received an error about not being able to find /dev/sda, you can see what disks are available on it with: -# -# talosctl -n $VIP get disks --insecure - -# See https://www.talos.dev/v1.10/talos-guides/configuration/patching/ -talosctl machineconfig patch generated/controlplane.yaml --patch @patch/controlplane.yaml -o final/controlplane.yaml -talosctl machineconfig patch generated/worker.yaml --patch @patch/worker.yaml -o final/worker.yaml -$ - -# Apply control plane config -talosctl apply-config --insecure -n $CONTROL_NODE_1,$CONTROL_NODE_2,$CONTROL_NODE_3 --file final/controlplane.yaml - -# Bootstrap cluster on control plan -talosctl bootstrap -n $VIP - -# Merge new cluster information into kubeconfig +# Get kubeconfig talosctl kubeconfig -# You are now ready to use both `talosctl` and `kubectl` against your new cluster. +# Verify cluster is ready +kubectl get nodes +``` + +## Complete Example + +Here's a complete example of setting up a 3-node control plane: + +```bash +# CLUSTER INITIALIZATION (once per cluster) +./init-cluster.sh + +# NODE 1 +# Boot node with Talos ISO, it gets DHCP IP 192.168.8.168 +./detect-node-hardware.sh 192.168.8.168 1 +./generate-machine-configs.sh +talosctl apply-config --insecure -n 192.168.8.168 --file final/controlplane-node-1.yaml +# Node reboots and comes up at 192.168.8.31 + +# NODE 2 +# Boot second node with Talos ISO, it gets DHCP IP 192.168.8.169 +./detect-node-hardware.sh 192.168.8.169 2 +./generate-machine-configs.sh +talosctl apply-config --insecure -n 192.168.8.169 --file final/controlplane-node-2.yaml +# Node reboots and comes up at 192.168.8.32 + +# NODE 3 +# Boot third node with Talos ISO, it gets DHCP IP 192.168.8.170 +./detect-node-hardware.sh 192.168.8.170 3 +./generate-machine-configs.sh +talosctl apply-config --insecure -n 192.168.8.170 --file final/controlplane-node-3.yaml +# Node reboots and comes up at 192.168.8.33 + +# CLUSTER BOOTSTRAP +talosctl bootstrap -n 192.168.8.30 +talosctl kubeconfig +kubectl get nodes +``` + +## Configuration Details + +### Per-Node Configuration + +Each control plane node has its own configuration block in `config.yaml`: + +```yaml +cluster: + nodes: + control: + vip: 192.168.8.30 + node1: + ip: 192.168.8.31 + interface: enp4s0 # Discovered automatically + disk: /dev/sdb # Selected during hardware detection + node2: + ip: 192.168.8.32 + # interface and disk added after hardware detection + node3: + ip: 192.168.8.33 + # interface and disk added after hardware detection +``` + +Worker nodes use DHCP by default. You can use the same hardware detection process for worker nodes if static IPs are needed. + +## Talosconfig Management + +### Context Naming and Conflicts + +When running `talosctl config merge ./generated/talosconfig`, if a context with the same name already exists, talosctl will create an enumerated version (e.g., `demo-cluster-2`). + +**For a clean setup:** + +- Delete existing contexts before merging: `talosctl config contexts` then `talosctl config context --remove` +- Or use `--force` to overwrite: `talosctl config merge ./generated/talosconfig --force` + +**Recommended approach for new clusters:** + +```bash +# Remove old context if rebuilding cluster +talosctl config context demo-cluster --remove || true + +# Merge new configuration +talosctl config merge ./generated/talosconfig +talosctl config endpoint 192.168.8.30 +talosctl config node 192.168.8.31 # Add nodes as they are registered +``` + +### Context Configuration Timeline + +1. **After first node hardware detection**: Merge talosconfig and set endpoint/first node +2. **After additional nodes**: Add them to the existing context with `talosctl config node ` +3. **Before cluster bootstrap**: Ensure all control plane nodes are in the node list + +### System Extensions + +All nodes include: + +- `siderolabs/iscsi-tools`: Required for Longhorn storage +- `siderolabs/util-linux-tools`: Utility tools for storage operations + +### Hardware Detection + +The `detect-node-hardware.sh` script automatically discovers: + +- **Network interfaces**: Finds active ethernet interfaces (no more hardcoded `eth0`) +- **Installation disks**: Lists available disks >10GB for interactive selection +- **Per-node settings**: Updates `config.yaml` with hardware-specific configuration + +This eliminates the need to manually configure hardware settings and handles different hardware configurations across nodes. + +### Template Structure + +Configuration templates are stored in `patch.templates/` and use gomplate syntax: + +- `controlplane-node-1.yaml`: Template for first control plane node +- `controlplane-node-2.yaml`: Template for second control plane node +- `controlplane-node-3.yaml`: Template for third control plane node +- `worker.yaml`: Template for worker nodes + +Templates use per-node variables from `config.yaml`: + +- `{{ .cluster.nodes.control.node1.ip }}` +- `{{ .cluster.nodes.control.node1.interface }}` +- `{{ .cluster.nodes.control.node1.disk }}` +- `{{ .cluster.nodes.control.vip }}` + +The `wild-compile-template-dir` command processes all templates and outputs compiled configurations to the `patch/` directory. + +## Troubleshooting + +### Hardware Detection Issues + +```bash +# Check if node is accessible in maintenance mode +talosctl -n version --insecure + +# View available network interfaces +talosctl -n get links --insecure + +# View available disks +talosctl -n get disks --insecure +``` + +### Manual Hardware Discovery + +If the automatic detection fails, you can manually inspect hardware: + +```bash +# Find active ethernet interfaces +talosctl -n get links --insecure -o json | jq -s '.[] | select(.spec.operationalState == "up" and .spec.type == "ether" and .metadata.id != "lo") | .metadata.id' + +# Find suitable installation disks +talosctl -n get disks --insecure -o json | jq -s '.[] | select(.spec.size > 10000000000) | .metadata.id' +``` + +### Node Status + +```bash +# View machine configuration (only works after config is applied) +talosctl -n get machineconfig ``` diff --git a/setup/cluster-nodes/create-installer-image.sh b/setup/cluster-nodes/create-installer-image.sh new file mode 100755 index 0000000..d10b67d --- /dev/null +++ b/setup/cluster-nodes/create-installer-image.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +# Talos custom installer image creation script +# This script generates installer image URLs using the centralized schematic ID + +set -euo pipefail + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`." + exit 1 +fi + +# Get Talos version and schematic ID from config +TALOS_VERSION=$(wild-config cluster.nodes.talos.version) +SCHEMATIC_ID=$(wild-config cluster.nodes.talos.schematicId) + +echo "Creating custom Talos installer image..." +echo "Talos version: $TALOS_VERSION" + +# Check if schematic ID exists +if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "null" ]; then + echo "Error: No schematic ID found in config.yaml" + echo "Run 'wild-talos-schema' first to upload schematic and get ID" + exit 1 +fi + +echo "Schematic ID: $SCHEMATIC_ID" +echo "" +echo "Schematic includes:" +yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "${WC_HOME}/config.yaml" | sed 's/^/ - /' +echo "" + +# Generate installer image URL +INSTALLER_URL="factory.talos.dev/metal-installer/$SCHEMATIC_ID:$TALOS_VERSION" + +echo "" +echo "🎉 Custom installer image URL generated!" +echo "" +echo "Installer URL: $INSTALLER_URL" +echo "" +echo "Usage in machine configuration:" +echo "machine:" +echo " install:" +echo " image: $INSTALLER_URL" +echo "" +echo "Next steps:" +echo "1. Update machine config templates with this installer URL" +echo "2. Regenerate machine configurations" +echo "3. Apply to existing nodes to trigger installation with extensions" +echo "" +echo "To update templates automatically, run:" +echo " sed -i 's|image:.*|image: $INSTALLER_URL|' patch.templates/controlplane-node-*.yaml" \ No newline at end of file diff --git a/setup/cluster-nodes/detect-node-hardware.sh b/setup/cluster-nodes/detect-node-hardware.sh new file mode 100755 index 0000000..1735189 --- /dev/null +++ b/setup/cluster-nodes/detect-node-hardware.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +# Node registration script for Talos cluster setup +# This script discovers hardware configuration from a node in maintenance mode +# and updates config.yaml with per-node hardware settings + +set -euo pipefail + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`." + exit 1 +fi + +# Usage function +usage() { + echo "Usage: register-node.sh " + echo "" + echo "Register a Talos node by discovering its hardware configuration." + echo "The node must be booted in maintenance mode and accessible via IP." + echo "" + echo "Arguments:" + echo " node-ip Current IP of the node in maintenance mode" + echo " node-number Node number (1, 2, or 3) for control plane nodes" + echo "" + echo "Examples:" + echo " ./register-node.sh 192.168.8.168 1" + echo " ./register-node.sh 192.168.8.169 2" + echo "" + echo "This script will:" + echo " - Query the node for available network interfaces" + echo " - Query the node for available disks" + echo " - Update config.yaml with the per-node hardware settings" + echo " - Update patch templates to use per-node hardware" +} + +# Parse arguments +if [ $# -ne 2 ]; then + usage + exit 1 +fi + +NODE_IP="$1" +NODE_NUMBER="$2" + +# Validate node number +if [[ ! "$NODE_NUMBER" =~ ^[1-3]$ ]]; then + echo "Error: Node number must be 1, 2, or 3" + exit 1 +fi + +echo "Registering Talos control plane node $NODE_NUMBER at $NODE_IP..." + +# Test connectivity +echo "Testing connectivity to node..." +if ! talosctl -n "$NODE_IP" get links --insecure >/dev/null 2>&1; then + echo "Error: Cannot connect to node at $NODE_IP" + echo "Make sure the node is booted in maintenance mode and accessible." + exit 1 +fi + +echo "✅ Node is accessible" + +# Discover network interfaces +echo "Discovering network interfaces..." + +# First, try to find the interface that's actually carrying traffic (has the default route) +CONNECTED_INTERFACE=$(talosctl -n "$NODE_IP" get routes --insecure -o json 2>/dev/null | \ + jq -s -r '.[] | select(.spec.destination == "0.0.0.0/0" and .spec.gateway != null) | .spec.outLinkName' | \ + head -1) + +if [ -n "$CONNECTED_INTERFACE" ]; then + ACTIVE_INTERFACE="$CONNECTED_INTERFACE" + echo "✅ Discovered connected interface (with default route): $ACTIVE_INTERFACE" +else + # Fallback: find any active ethernet interface + echo "No default route found, checking for active ethernet interfaces..." + ACTIVE_INTERFACE=$(talosctl -n "$NODE_IP" get links --insecure -o json 2>/dev/null | \ + jq -s -r '.[] | select(.spec.operationalState == "up" and .spec.type == "ether" and .metadata.id != "lo") | .metadata.id' | \ + head -1) + + if [ -z "$ACTIVE_INTERFACE" ]; then + echo "Error: No active ethernet interface found" + echo "Available interfaces:" + talosctl -n "$NODE_IP" get links --insecure + echo "" + echo "Available routes:" + talosctl -n "$NODE_IP" get routes --insecure + exit 1 + fi + + echo "✅ Discovered active interface: $ACTIVE_INTERFACE" +fi + +# Discover available disks +echo "Discovering available disks..." +AVAILABLE_DISKS=$(talosctl -n "$NODE_IP" get disks --insecure -o json 2>/dev/null | \ + jq -s -r '.[] | select(.spec.size > 10000000000) | .metadata.id' | \ + head -5) + +if [ -z "$AVAILABLE_DISKS" ]; then + echo "Error: No suitable disks found (must be >10GB)" + echo "Available disks:" + talosctl -n "$NODE_IP" get disks --insecure + exit 1 +fi + +echo "Available disks (>10GB):" +echo "$AVAILABLE_DISKS" +echo "" + +# Let user choose disk +echo "Select installation disk for node $NODE_NUMBER:" +select INSTALL_DISK in $AVAILABLE_DISKS; do + if [ -n "${INSTALL_DISK:-}" ]; then + break + fi + echo "Invalid selection. Please try again." +done + +# Add /dev/ prefix if not present +if [[ "$INSTALL_DISK" != /dev/* ]]; then + INSTALL_DISK="/dev/$INSTALL_DISK" +fi + +echo "✅ Selected disk: $INSTALL_DISK" + +# Update config.yaml with per-node configuration +echo "Updating config.yaml with node $NODE_NUMBER configuration..." + +CONFIG_FILE="${WC_HOME}/config.yaml" + +# Get the target IP for this node from the existing config +TARGET_IP=$(yq eval ".cluster.nodes.control.node${NODE_NUMBER}.ip" "$CONFIG_FILE") + +# Use yq to update the per-node configuration +yq eval ".cluster.nodes.control.node${NODE_NUMBER}.ip = \"$TARGET_IP\"" -i "$CONFIG_FILE" +yq eval ".cluster.nodes.control.node${NODE_NUMBER}.interface = \"$ACTIVE_INTERFACE\"" -i "$CONFIG_FILE" +yq eval ".cluster.nodes.control.node${NODE_NUMBER}.disk = \"$INSTALL_DISK\"" -i "$CONFIG_FILE" + +echo "✅ Updated config.yaml for node $NODE_NUMBER:" +echo " - Target IP: $TARGET_IP" +echo " - Network interface: $ACTIVE_INTERFACE" +echo " - Installation disk: $INSTALL_DISK" + + +echo "" +echo "🎉 Node $NODE_NUMBER registration complete!" +echo "" +echo "Node configuration saved:" +echo " - Target IP: $TARGET_IP" +echo " - Interface: $ACTIVE_INTERFACE" +echo " - Disk: $INSTALL_DISK" +echo "" +echo "Next steps:" +echo "1. Regenerate machine configurations:" +echo " ./generate-machine-configs.sh" +echo "" +echo "2. Apply configuration to this node:" +echo " talosctl apply-config --insecure -n $NODE_IP --file final/controlplane-node-${NODE_NUMBER}.yaml" +echo "" +echo "3. Wait for reboot and verify static IP connectivity" +echo "4. Repeat registration for additional control plane nodes" \ No newline at end of file diff --git a/setup/cluster-nodes/final/.gitkeep b/setup/cluster-nodes/final/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster-nodes/generate-machine-configs.sh b/setup/cluster-nodes/generate-machine-configs.sh new file mode 100755 index 0000000..1a70c02 --- /dev/null +++ b/setup/cluster-nodes/generate-machine-configs.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +# Talos machine configuration generation script +# This script generates machine configs for registered nodes using existing cluster secrets + +set -euo pipefail + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`." + exit 1 +fi + +NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes" + +# Check if cluster has been initialized +if [ ! -f "${NODE_SETUP_DIR}/generated/secrets.yaml" ]; then + echo "Error: Cluster not initialized. Run ./init-cluster.sh first." + exit 1 +fi + +# Get cluster configuration from config.yaml +CLUSTER_NAME=$(wild-config cluster.name) +VIP=$(wild-config cluster.nodes.control.vip) + +echo "Generating machine configurations for cluster: $CLUSTER_NAME" + +# Check which nodes have been registered (have hardware config) +REGISTERED_NODES=() +for i in 1 2 3; do + if yq eval ".cluster.nodes.control.node${i}.interface" "${WC_HOME}/config.yaml" | grep -v "null" >/dev/null 2>&1; then + NODE_IP=$(wild-config cluster.nodes.control.node${i}.ip) + REGISTERED_NODES+=("$NODE_IP") + echo "✅ Node $i registered: $NODE_IP" + else + echo "⏸️ Node $i not registered yet" + fi +done + +if [ ${#REGISTERED_NODES[@]} -eq 0 ]; then + echo "" + echo "No nodes have been registered yet." + echo "Run ./detect-node-hardware.sh first." + exit 1 +fi + +# Create directories +mkdir -p "${NODE_SETUP_DIR}/final" "${NODE_SETUP_DIR}/patch" + +# Compile patch templates for registered nodes only +echo "Compiling patch templates..." + +for i in 1 2 3; do + if yq eval ".cluster.nodes.control.node${i}.interface" "${WC_HOME}/config.yaml" | grep -v "null" >/dev/null 2>&1; then + echo "Compiling template for control plane node $i..." + cat "${NODE_SETUP_DIR}/patch.templates/controlplane-node-${i}.yaml" | wild-compile-template > "${NODE_SETUP_DIR}/patch/controlplane-node-${i}.yaml" + fi +done + +# Always compile worker template (doesn't require hardware detection) +if [ -f "${NODE_SETUP_DIR}/patch.templates/worker.yaml" ]; then + cat "${NODE_SETUP_DIR}/patch.templates/worker.yaml" | wild-compile-template > "${NODE_SETUP_DIR}/patch/worker.yaml" +fi + +# Generate final machine configs for registered nodes only +echo "Generating final machine configurations..." +for i in 1 2 3; do + if yq eval ".cluster.nodes.control.node${i}.interface" "${WC_HOME}/config.yaml" | grep -v "null" >/dev/null 2>&1; then + echo "Generating config for control plane node $i..." + talosctl machineconfig patch "${NODE_SETUP_DIR}/generated/controlplane.yaml" --patch @"${NODE_SETUP_DIR}/patch/controlplane-node-${i}.yaml" -o "${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml" + fi +done + +# Always generate worker config (doesn't require hardware detection) +if [ -f "${NODE_SETUP_DIR}/patch/worker.yaml" ]; then + echo "Generating worker config..." + talosctl machineconfig patch "${NODE_SETUP_DIR}/generated/worker.yaml" --patch @"${NODE_SETUP_DIR}/patch/worker.yaml" -o "${NODE_SETUP_DIR}/final/worker.yaml" +fi + +# Update talosctl context with registered nodes +echo "Updating talosctl context..." +if [ ${#REGISTERED_NODES[@]} -gt 0 ]; then + talosctl config node "${REGISTERED_NODES[@]}" +fi + +echo "" +echo "✅ Machine configurations generated successfully!" +echo "" +echo "Generated configs:" +for i in 1 2 3; do + if [ -f "${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml" ]; then + NODE_IP=$(wild-config cluster.nodes.control.node${i}.ip) + echo " - ${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml (target IP: $NODE_IP)" + fi +done +if [ -f "${NODE_SETUP_DIR}/final/worker.yaml" ]; then + echo " - ${NODE_SETUP_DIR}/final/worker.yaml" +fi +echo "" +echo "Current talosctl configuration:" +talosctl config info +echo "" +echo "Next steps:" +echo "1. Apply configurations to nodes in maintenance mode:" +for i in 1 2 3; do + if [ -f "${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml" ]; then + echo " talosctl apply-config --insecure -n --file ${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml" + fi +done +echo "" +echo "2. Wait for nodes to reboot with static IPs, then bootstrap cluster with ANY control node:" +echo " talosctl bootstrap --nodes 192.168.8.31 --endpoint 192.168.8.31" +echo "" +echo "3. Get kubeconfig:" +echo " talosctl kubeconfig" diff --git a/setup/cluster-nodes/generated/controlplane.yaml b/setup/cluster-nodes/generated/controlplane.yaml new file mode 100644 index 0000000..efee010 --- /dev/null +++ b/setup/cluster-nodes/generated/controlplane.yaml @@ -0,0 +1,577 @@ +version: v1alpha1 # Indicates the schema used to decode the contents. +debug: false # Enable verbose logging to the console. +persist: true +# Provides machine specific configuration options. +machine: + type: controlplane # Defines the role of the machine within the cluster. + token: t1yf7w.zwevymjw6v0v1q76 # The `token` is used by a machine to join the PKI of the cluster. + # The root certificate authority of the PKI. + ca: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJT0hOamQ1blVzdVRGRXpsQmtFOVhkZUJ4b1AxMk9mY2R4a0tjQmZlU0xKbgotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K + # Extra certificate subject alternative names for the machine's certificate. + certSANs: [] + # # Uncomment this to enable SANs. + # - 10.0.0.10 + # - 172.16.0.10 + # - 192.168.0.10 + + # Used to provide additional options to the kubelet. + kubelet: + image: ghcr.io/siderolabs/kubelet:v1.33.1 # The `image` field is an optional reference to an alternative kubelet image. + defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile. + disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory. + + # # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list. + # clusterDNS: + # - 10.96.0.10 + # - 169.254.2.53 + + # # The `extraArgs` field is used to provide additional flags to the kubelet. + # extraArgs: + # key: value + + # # The `extraMounts` field is used to add additional mounts to the kubelet container. + # extraMounts: + # - destination: /var/lib/example # Destination is the absolute path where the mount will be placed in the container. + # type: bind # Type specifies the mount kind. + # source: /var/lib/example # Source specifies the source path of the mount. + # # Options are fstab style mount options. + # options: + # - bind + # - rshared + # - rw + + # # The `extraConfig` field is used to provide kubelet configuration overrides. + # extraConfig: + # serverTLSBootstrap: true + + # # The `KubeletCredentialProviderConfig` field is used to provide kubelet credential configuration. + # credentialProviderConfig: + # apiVersion: kubelet.config.k8s.io/v1 + # kind: CredentialProviderConfig + # providers: + # - apiVersion: credentialprovider.kubelet.k8s.io/v1 + # defaultCacheDuration: 12h + # matchImages: + # - '*.dkr.ecr.*.amazonaws.com' + # - '*.dkr.ecr.*.amazonaws.com.cn' + # - '*.dkr.ecr-fips.*.amazonaws.com' + # - '*.dkr.ecr.us-iso-east-1.c2s.ic.gov' + # - '*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov' + # name: ecr-credential-provider + + # # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet. + # nodeIP: + # # The `validSubnets` field configures the networks to pick kubelet node IP from. + # validSubnets: + # - 10.0.0.0/8 + # - '!10.0.0.3/32' + # - fdc7::/16 + # Provides machine specific network configuration options. + network: {} + # # `interfaces` is used to define the network interface configuration. + # interfaces: + # - interface: enp0s1 # The interface name. + # # Assigns static IP addresses to the interface. + # addresses: + # - 192.168.2.0/24 + # # A list of routes associated with the interface. + # routes: + # - network: 0.0.0.0/0 # The route's network (destination). + # gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route). + # metric: 1024 # The optional metric for the route. + # mtu: 1500 # The interface's MTU. + # + # # # Picks a network device using the selector. + + # # # select a device with bus prefix 00:*. + # # deviceSelector: + # # busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard. + # # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver. + # # deviceSelector: + # # hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard. + # # driver: virtio_net # Kernel driver, supports matching by wildcard. + # # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver. + # # deviceSelector: + # # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard. + # # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard. + # # driver: virtio_net # Kernel driver, supports matching by wildcard. + + # # # Bond specific options. + # # bond: + # # # The interfaces that make up the bond. + # # interfaces: + # # - enp2s0 + # # - enp2s1 + # # # Picks a network device using the selector. + # # deviceSelectors: + # # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard. + # # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard. + # # driver: virtio_net # Kernel driver, supports matching by wildcard. + # # mode: 802.3ad # A bond option. + # # lacpRate: fast # A bond option. + + # # # Bridge specific options. + # # bridge: + # # # The interfaces that make up the bridge. + # # interfaces: + # # - enxda4042ca9a51 + # # - enxae2a6774c259 + # # # Enable STP on this bridge. + # # stp: + # # enabled: true # Whether Spanning Tree Protocol (STP) is enabled. + + # # # Configure this device as a bridge port. + # # bridgePort: + # # master: br0 # The name of the bridge master interface + + # # # Indicates if DHCP should be used to configure the interface. + # # dhcp: true + + # # # DHCP specific options. + # # dhcpOptions: + # # routeMetric: 1024 # The priority of all routes received via DHCP. + + # # # Wireguard specific configuration. + + # # # wireguard server example + # # wireguard: + # # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded). + # # listenPort: 51111 # Specifies a device's listening port. + # # # Specifies a list of peer configurations to apply to a device. + # # peers: + # # - publicKey: ABCDEF... # Specifies the public key of this peer. + # # endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry. + # # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer. + # # allowedIPs: + # # - 192.168.1.0/24 + # # # wireguard peer example + # # wireguard: + # # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded). + # # # Specifies a list of peer configurations to apply to a device. + # # peers: + # # - publicKey: ABCDEF... # Specifies the public key of this peer. + # # endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry. + # # persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer. + # # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer. + # # allowedIPs: + # # - 192.168.1.0/24 + + # # # Virtual (shared) IP address configuration. + + # # # layer2 vip example + # # vip: + # # ip: 172.16.199.55 # Specifies the IP address to be used. + + # # Used to statically set the nameservers for the machine. + # nameservers: + # - 8.8.8.8 + # - 1.1.1.1 + + # # Used to statically set arbitrary search domains. + # searchDomains: + # - example.org + # - example.com + + # # Allows for extra entries to be added to the `/etc/hosts` file + # extraHostEntries: + # - ip: 192.168.1.100 # The IP of the host. + # # The host alias. + # aliases: + # - example + # - example.domain.tld + + # # Configures KubeSpan feature. + # kubespan: + # enabled: true # Enable the KubeSpan feature. + + # Used to provide instructions for installations. + install: + disk: /dev/sda # The disk used for installations. + image: ghcr.io/siderolabs/installer:v1.10.3 # Allows for supplying the image used to perform the installation. + wipe: false # Indicates if the installation disk should be wiped at installation time. + + # # Look up disk using disk attributes like model, size, serial and others. + # diskSelector: + # size: 4GB # Disk size. + # model: WDC* # Disk model `/sys/block//device/model`. + # busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path. + + # # Allows for supplying extra kernel args via the bootloader. + # extraKernelArgs: + # - talos.platform=metal + # - reboot=k + # Used to configure the machine's container image registry mirrors. + registries: {} + # # Specifies mirror configuration for each registry host namespace. + # mirrors: + # ghcr.io: + # # List of endpoints (URLs) for registry mirrors to use. + # endpoints: + # - https://registry.insecure + # - https://ghcr.io/v2/ + + # # Specifies TLS & auth configuration for HTTPS image registries. + # config: + # registry.insecure: + # # The TLS configuration for the registry. + # tls: + # insecureSkipVerify: true # Skip TLS server certificate verification (not recommended). + # + # # # Enable mutual TLS authentication with the registry. + # # clientIdentity: + # # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t + # # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ== + # + # # # The auth configuration for this registry. + # # auth: + # # username: username # Optional registry authentication. + # # password: password # Optional registry authentication. + + # Features describe individual Talos features that can be switched on or off. + features: + rbac: true # Enable role-based access control (RBAC). + stableHostname: true # Enable stable default hostname. + apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid. + diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks. + # KubePrism - local proxy/load balancer on defined port that will distribute + kubePrism: + enabled: true # Enable KubePrism support - will start local load balancing proxy. + port: 7445 # KubePrism port. + # Configures host DNS caching resolver. + hostDNS: + enabled: true # Enable host DNS caching resolver. + forwardKubeDNSToHost: true # Use the host DNS resolver as upstream for Kubernetes CoreDNS pods. + + # # Configure Talos API access from Kubernetes pods. + # kubernetesTalosAPIAccess: + # enabled: true # Enable Talos API access from Kubernetes pods. + # # The list of Talos API roles which can be granted for access from Kubernetes pods. + # allowedRoles: + # - os:reader + # # The list of Kubernetes namespaces Talos API access is available from. + # allowedKubernetesNamespaces: + # - kube-system + # Configures the node labels for the machine. + nodeLabels: + node.kubernetes.io/exclude-from-external-load-balancers: "" + + # # Provides machine specific control plane configuration options. + + # # ControlPlane definition example. + # controlPlane: + # # Controller manager machine specific configuration options. + # controllerManager: + # disabled: false # Disable kube-controller-manager on the node. + # # Scheduler machine specific configuration options. + # scheduler: + # disabled: true # Disable kube-scheduler on the node. + + # # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver. + + # # nginx static pod. + # pods: + # - apiVersion: v1 + # kind: pod + # metadata: + # name: nginx + # spec: + # containers: + # - image: nginx + # name: nginx + + # # Allows the addition of user specified files. + + # # MachineFiles usage example. + # files: + # - content: '...' # The contents of the file. + # permissions: 0o666 # The file's permissions in octal. + # path: /tmp/file.txt # The path of the file. + # op: append # The operation to use + + # # The `env` field allows for the addition of environment variables. + + # # Environment variables definition examples. + # env: + # GRPC_GO_LOG_SEVERITY_LEVEL: info + # GRPC_GO_LOG_VERBOSITY_LEVEL: "99" + # https_proxy: http://SERVER:PORT/ + # env: + # GRPC_GO_LOG_SEVERITY_LEVEL: error + # https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/ + # env: + # https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/ + + # # Used to configure the machine's time settings. + + # # Example configuration for cloudflare ntp server. + # time: + # disabled: false # Indicates if the time service is disabled for the machine. + # # description: | + # servers: + # - time.cloudflare.com + # bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence. + + # # Used to configure the machine's sysctls. + + # # MachineSysctls usage example. + # sysctls: + # kernel.domainname: talos.dev + # net.ipv4.ip_forward: "0" + # net/ipv6/conf/eth0.100/disable_ipv6: "1" + + # # Used to configure the machine's sysfs. + + # # MachineSysfs usage example. + # sysfs: + # devices.system.cpu.cpu0.cpufreq.scaling_governor: performance + + # # Machine system disk encryption configuration. + # systemDiskEncryption: + # # Ephemeral partition encryption. + # ephemeral: + # provider: luks2 # Encryption provider to use for the encryption. + # # Defines the encryption keys generation and storage method. + # keys: + # - # Deterministically generated key from the node UUID and PartitionLabel. + # nodeID: {} + # slot: 0 # Key slot number for LUKS2 encryption. + # + # # # KMS managed encryption key. + # # kms: + # # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key. + # + # # # Cipher kind to use for the encryption. Depends on the encryption provider. + # # cipher: aes-xts-plain64 + + # # # Defines the encryption sector size. + # # blockSize: 4096 + + # # # Additional --perf parameters for the LUKS2 encryption. + # # options: + # # - no_read_workqueue + # # - no_write_workqueue + + # # Configures the udev system. + # udev: + # # List of udev rules to apply to the udev system + # rules: + # - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660" + + # # Configures the logging system. + # logging: + # # Logging destination. + # destinations: + # - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp". + # format: json_lines # Logs format. + + # # Configures the kernel. + # kernel: + # # Kernel modules to load. + # modules: + # - name: brtfs # Module name. + + # # Configures the seccomp profiles for the machine. + # seccompProfiles: + # - name: audit.json # The `name` field is used to provide the file name of the seccomp profile. + # # The `value` field is used to provide the seccomp profile. + # value: + # defaultAction: SCMP_ACT_LOG + + # # Override (patch) settings in the default OCI runtime spec for CRI containers. + + # # override default open file limit + # baseRuntimeSpecOverrides: + # process: + # rlimits: + # - hard: 1024 + # soft: 1024 + # type: RLIMIT_NOFILE + + # # Configures the node annotations for the machine. + + # # node annotations example. + # nodeAnnotations: + # customer.io/rack: r13a25 + + # # Configures the node taints for the machine. Effect is optional. + + # # node taints example. + # nodeTaints: + # exampleTaint: exampleTaintValue:NoSchedule +# Provides cluster specific configuration options. +cluster: + id: 1DOt3ZYTVTzEG_Q2IYnScCjz1rxZYwWRHV9hGXBu1UE= # Globally unique identifier for this cluster (base64 encoded random 32 bytes). + secret: qvOKMH5RJtMOPSLBnWCPV4apReFGTd1czZ+tfz11/jI= # Shared secret of cluster (base64 encoded random 32 bytes). + # Provides control plane specific configuration options. + controlPlane: + endpoint: https://192.168.8.30:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname. + clusterName: demo-cluster # Configures the cluster's name. + # Provides cluster specific network configuration options. + network: + dnsDomain: cluster.local # The domain used by Kubernetes DNS. + # The pod subnet CIDR. + podSubnets: + - 10.244.0.0/16 + # The service subnet CIDR. + serviceSubnets: + - 10.96.0.0/12 + + # # The CNI used. + # cni: + # name: custom # Name of CNI to use. + # # URLs containing manifests to apply for the CNI. + # urls: + # - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml + token: ed454d.o4jsg75idc817ojs # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster. + secretboxEncryptionSecret: e+8hExoi1Ap4IS6StTsScp72EXKAE2Xi+J7irS7UeG0= # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). + # The base64 encoded root certificate authority used by Kubernetes. + ca: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRRWU5cFdPWEFzd09PNm9NYXNDaXRtakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTFNRFl5TXpBeU16Z3hNMW9YRFRNMU1EWXlNVEF5TXpneApNMW93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ3p0YTA1T3NWOU1NaVg4WDZEdC9xbkhWelkra2tqZ01rcjdsU1kzaERPbmVWYnBhOTJmSHlkS1QyWEgKcWN1L3FJWHpodTg0ckN0VWJuQUsyckJUekFPallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVtWEhwMmM5bGRtdFg0Y2RibDlpM0Rwd05GYzB3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBwVXVoNmIKYUMwaXdzNTh5WWVlYXVMU1JhbnEveVNUcGo2T0N4UGkvTXJpQWlFQW1DUVdRQ290NkM5b0c5TUlaeDFmMmMxcApBUFRFTHFNQm1vZ1NLSis5dXZBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpMVWF4Z2RXR0Flb1ZNRW1CYkZHUjBjbTJMK1ZxNXFsVVZMaE1USHF1ZnVvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTE8xclRrNnhYMHd5SmZ4Zm9PMytxY2RYTmo2U1NPQXlTdnVWSmplRU02ZDVWdWxyM1o4ZgpKMHBQWmNlcHk3K29oZk9HN3ppc0sxUnVjQXJhc0ZQTUF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation. + aggregatorCA: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJZRENDQVFhZ0F3SUJBZ0lSQVB2Y2ZReS9pbWkzQUtZdm1GNnExcmd3Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU5UQTJNak13TWpNNE1UTmFGdzB6TlRBMk1qRXdNak00TVROYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVI5NjFKWXl4N2ZxSXJHaURhMTUvVFVTc2xoR2xjSWhzandvcGFpTDg0dzNiQVBaOVdQCjliRThKUnJOTUIvVGkxSUJwbm1IbitXZ3pjeFBnbmllYzZnWG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGQ29XYVB4engxL01IanlqcVR1WkhXY2hOeXoxTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklHNWdQRHhmYVhNVlMwTEJ5bDNLOENLZVRGNHlBQnV0Zk0vT0hKRGR6ZHNsQWlFQW1pVU9tOU5ma2pQY2ducEcKZzdqd0NQbzczNW5zNXV4d2RRdEZpbjdnMEhvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUxwZzZoSlBhR3A0ZmRPdkQwVGUwZklPSWJvWUdHdUM4OXBHbThWU3NYWE1vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFZmV0U1dNc2UzNmlLeG9nMnRlZjAxRXJKWVJwWENJYkk4S0tXb2kvT01OMndEMmZWai9XeApQQ1VhelRBZjA0dFNBYVo1aDUvbG9NM01UNEo0bm5Pb0Z3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + # The base64 encoded private key for service account token generation. + serviceAccount: + key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBK21FdWh4OWxrbXJXeUdaeWIvcXZCU1hUcys0R2lPRDZ2TlJhRSsrR044WnZxWkI1CktTWFM0Q3pzZXE4V0dKOXV2Yzg0WmdaWk9wY3ZZbXM1T1BKdzE2MjJFSUlaV1FKeXdzZ0F2NWFsTWxUZ1BxLzEKejRtSjlURW5lajZVMUE4cXU1U3FYa3F5dzNZNFdsVUU1TnlhR0d3RE9yMlduTjFTMWI0YXh0V2ZLa1hxUjlFUApvOWVrK1g3UHQwdVhQV0RQQlNiSUE1V3BQRkdqN3dic1lMMTcwcW1GemwvRElUY1Q2S3ROb0lxYTVWVGpqRmRkCkRDY2VKQ3ZldFMvT1F2WG1pcXhtTnBPbW02eFhqcGxRMmNYVUg5b3NsSHREUG4vMUszNlBVWlpaZFJHU2lvQm0KM0RHZHlOU2huN0pCN0J6dmZzaGFqU3pxeExxUmNhaHhMcVdZV2hPUEJiTmVXZ0lTMm5CcDJRYXZhMUR2YkpneQpadGVVaW1EK0VUZlB1QXFZOW9OS0Z0eFVTaS9pTUpYMTM0ZFUvQVZPRXZqaXhPNnlQQjZHVUxpb0ppOHJRVG9TCmVDNStRWXFSU2RSMzhhWFo3R2VSaUlvR3BqMndRY2Y2emVoRHJTUUdabU5BZlpaenV3T1JGei9pRTJrWTBXRGwKV1p2RFlTSFNXbk5UdmZyQk0xN1pEYzNGdTRaSGNaWUpKVGdCMDJGS2kzcS9uRWdudy9zTEhHUEl3SVIvaDlidgpzcVRVMDJYaHRKQlgwYUE2RlFqaG1NTGNvLzF0ci84Y3BPVVcvdVhPM2Y5czZHMW1OY21qeDNVamJqU09xSlRnCmFYVTlGeWZJR2lYei9JcDg0Q2Jsb0wvRXJxQmVXVEQvV2twMWF1QThQcXp6emFmU3NrSzNnd2Rla2NjQ0F3RUEKQVFLQ0FnQWVQUEt4dSs5NE90eWNwWTY5aWx0dElPVTJqanU0elRSSzdpUnJmWnAxT0VzUHJKU05ZbldHRmJiVwpRWTN4YnY1MkFFV0hRTTRHNlpwVE93Vi8rZnVrZUk5aE9BSnRWd0tGYSswaDRPK0ExWEpObW56eWZHR3VaeW9sCmluMmo0ZjhKcjNSOGl4aVlJRG9YRFdjdFovSlk3N2FHSWhQRFRHYkVJZW81bllsVVFYbXFyd3RzcTA3NmJoVVMKUmNLZ0FEQ1FVVFRkQmZhWWc4MldGbEoyMlNZbFNpR1FTODFxUUt6RldwR01uc3RYMWZtSWlmRXNBTG9VVStpdQpIaUM5YlNyVFpaVzU1L2xrNzBWQWJMQ3dmdTFWanNqMzE2NXhLUTFJSEVmeWFsalJ4Q0VHc1dkNndWS1ZIZytLClAxZC9JZndra00yQUk1bG96K3ExSjNWMUtqenNxdGVyY0JKTWhuTVdEYUp5NzhZaGZSZnY0TlNieC9ObjEveW0KanpvWXVjd3pRVEhsd0dLZUhoNG12OWZxM3U5cVJoTlEzNmNjOHowcndmT01BOFpBMVJOOFhkOG82dkxkNitHSQpSbDV6eHpoZ283MXB5V0dNNlZ5L3FqK1F0aWVZVzUrMHdUNVFqbW5WL256bDZLSWZzZGU5Q0xzcG02RnhUWVJlCjE5YzAwemlOWE56V3dPMG4yeTZkaWpKamErZ0lmT0pzVFlFb2dJQ0MxczB0N0orRGU0cHV4anVyalRjMTdZYkcKK1BpejMySmFCVDByYUUxdWlQZ1lhL3Bta1plRjBZTFgzemc4NGhSNHF3WmZsaHdNNTIxKzBJRWRRb29jd2Yycgoyb25xTWlVd2NhaVZzWEVBdjJzRDlwRkd3UEg4MUplY2JBcWNmZkJTcjVPY3VvSmsyUUtDQVFFQS93Nm1EbnFUClliK3dvOEl1SUpUanFjdzYwY1RQZzRnS0tsVzJFRWNqMW5qQitaZ2xzblZheHdMZ0QyaXd1d29BZWg1OTUzWkgKbjFoVk5Eb2VCcGJXcXBRY2VuZjlWVzNUMXpNVjVhZTNYenR1MzkrTExZUlVHakV3NitlMWNrendUSlRBYndnZAp5TnM5TjNDNno0bkhmd0NqRHc2VDhWTVFpRVB6akEveEp3L1RTQzBwRHQ5cTFQZ0hMMHBQMllkdkxvYlpEajJLCkRFb1ErcVE3Tm1XeXlLWGQxWUhZK3VaTDZ1SVlYUDNLSjVWQ0N6ZjlHVHZRUi9XL29DdTgzZzdzdWM3YndCajMKYnN5aElWQUxDTXRXSFhCVDdmNXJJVlhuZHEwdGl5cGQ2NTJDTjBya20xRHZ2L0tsTjZJV01jRkFudjRPV1M0aAphdEt0a3d6SVZCdmdQd0tDQVFFQSswNGJXaDVBVmRreXRzUy9peUlOWDQ1MXZsT0YvQVNIZW5EVjVQUDYxSWpXCll3eXFDNTlSdG0rbEI5MDNvZjVSM0RqQTZXelFvNTdUOFBwSmY2di8wN2RHSzQ2alM5ODRoNEswSzZseWllUHAKUlVlbFpEVDNIbi9WK2hhTUFscnlCUFNyRlFyRkVqdmNOMWN3SmMwTEtDSVBpNGVNeGYwMEdiTHErQ0Fic0szQQpCT3N1cDVxWlNMQWcrRGpIVDdGYnpyOTBMSlN1QnFNNXp0cnJHa1NlbmxQNEtRbGFRMTdEeWlJT2tVZUMvekhFCmg2K1NJMXNla3JHeTNEK3NrQW9HZTlOMVQyL3RPM2lsYVhtdTRIdVkwa3NCckNtZ3EzVTZROXZ0aW8yRmluL1QKQkQ2Y3Z2aUkxN1RJa3lIZkZWZktvRklyOWhIT0RYdEFad2lSQnFsc2VRS0NBUUFyOFQ4a3dYT0E1TUN2QmZaaQpnS1JVamE0WWs5cllvMmgwOEwxa1FvMW5Gdmo4WW4wa0tOblI3YW5pbmJ2TkRhVVZaUWwyQmtmQ3FUcE12REtPCkdoQ3o1TDZmVHVyamUvK0NWUGZSMERwa2V0M1lUakF4VUZvWkJSNlRsaUVKcHozRFErRi9mNXQ2RG1PV21LSm0KdlNzVXMyeGtYTE9hWVNBNUNkUDg3b1l5bjZSY0RBUEYzeklOclFtMzJRcTJ4SUdnTjNWUDRjUlY1N0RUTGRaUgp3ZVd5Y2ZrdEhxamVXU3o5TTZUVTZKaWFoem1RcXoyOHlqUlJJWUs1T3EvWVppUGN3MG5TNTdwQmFabmRIbWc0ClJLZjZmRzdKVXdyci9GdmJjMnlrVEZGUUZadm9vTXVRQXJxN2pEZHd4VWtqbTFMaDBZMXhTZVJSL2lnUGJLVmEKOEU2TEFvSUJBQ1Yxc2h3UDBGVTdxQlNZWlZqdS9ZRlY4ZlVwN0JueDd1UHdkK0hHQUlpMzBRVTR1UXc4ZG1pMApZYXczYkhpSU9WbVRXQ1l6WXpKUWxaVWhLZDJQSFBaSkpudU5xb2Uvd1dScHRrT2Y0WVB1WmpJK2lNZlVJVlg1CmhrTGVJNGFpV2RzbFFXOUVpTFc4R0lwalE3a094Ry82QzhrbnJuTkEyQWhRcERmU1NXNWZwL1RUdmNPY0J1ZFAKNGNvK1pHOWJwNnk4Mnl0ZUNrYlJBK2Z5dUFMVlliT0dIc0szTXk1QnJQdXZjZTV6ODNIbzBEdk5qd0lZTGdsOQoxWVNCTlU3UFA4SXJkaHdlT2dXWWFVZThyTFdubHRNWi9TalZsNjZYTGRVNXJrSHQ4SThCbU1uVUwzZEVBdG5zCmg4MXV5aHNiV0FmbjE4ZTVSYmE2dlpIZU5BZ0RMemtDZ2dFQkFKRXRJemdmdE85UE5RSDlQOHBBTnFaclpqWkMKZGJaaThCUkNTUy83b2tHeWtkQzZsdFJVd0c2ajIxNXZZcjlmVW94ODU5ZkI0RjhEK0FXWHNKSm44YnlQZFBuWQpwU1ZRMEV3a045aTE4WnowTitEbWVZRC9CUmdyVHA1OVU1Mmo5Q1ZyQWNQRlphVk00b0xwaEVSeDZvSURPOEcvCk9wUEZkVnJvMFhyN1lpbENiYVJLVWVWbjZWKzFIZ25zelhiUE9sakhrcGdXSXdKb1RkNkVWVDlvbXNVeFlVejcKRUR5L2RXNmVxVFBMUHR5Q2hKZlo5WDB6M09uUWpLbzcwdHhQa1VRTmw0azhqMU9mMFhMaklacmd6MmVub0FRZgpQYXhSc1lCckhNVnI5eStXaDhZdFdESGx1NUU4NlNaMXNIaHphOHhZZWpoQXRndzdqa0FyNWcxL2dOYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K + # API server specific configuration options. + apiServer: + image: registry.k8s.io/kube-apiserver:v1.33.1 # The container image used in the API server manifest. + # Extra certificate subject alternative names for the API server's certificate. + certSANs: + - 192.168.8.30 + disablePodSecurityPolicy: true # Disable PodSecurityPolicy in the API server and default manifests. + # Configure the API server admission plugins. + admissionControl: + - name: PodSecurity # Name is the name of the admission controller. + # Configuration is an embedded configuration object to be used as the plugin's + configuration: + apiVersion: pod-security.admission.config.k8s.io/v1alpha1 + defaults: + audit: restricted + audit-version: latest + enforce: baseline + enforce-version: latest + warn: restricted + warn-version: latest + exemptions: + namespaces: + - kube-system + runtimeClasses: [] + usernames: [] + kind: PodSecurityConfiguration + # Configure the API server audit policy. + auditPolicy: + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: Metadata + + # # Configure the API server authorization config. Node and RBAC authorizers are always added irrespective of the configuration. + # authorizationConfig: + # - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`. + # name: webhook # Name is used to describe the authorizer. + # # webhook is the configuration for the webhook authorizer. + # webhook: + # connectionInfo: + # type: InClusterConfig + # failurePolicy: Deny + # matchConditionSubjectAccessReviewVersion: v1 + # matchConditions: + # - expression: has(request.resourceAttributes) + # - expression: '!(\''system:serviceaccounts:kube-system\'' in request.groups)' + # subjectAccessReviewVersion: v1 + # timeout: 3s + # - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`. + # name: in-cluster-authorizer # Name is used to describe the authorizer. + # # webhook is the configuration for the webhook authorizer. + # webhook: + # connectionInfo: + # type: InClusterConfig + # failurePolicy: NoOpinion + # matchConditionSubjectAccessReviewVersion: v1 + # subjectAccessReviewVersion: v1 + # timeout: 3s + # Controller manager server specific configuration options. + controllerManager: + image: registry.k8s.io/kube-controller-manager:v1.33.1 # The container image used in the controller manager manifest. + # Kube-proxy server-specific configuration options + proxy: + image: registry.k8s.io/kube-proxy:v1.33.1 # The container image used in the kube-proxy manifest. + + # # Disable kube-proxy deployment on cluster bootstrap. + # disabled: false + # Scheduler server specific configuration options. + scheduler: + image: registry.k8s.io/kube-scheduler:v1.33.1 # The container image used in the scheduler manifest. + # Configures cluster member discovery. + discovery: + enabled: true # Enable the cluster membership discovery feature. + # Configure registries used for cluster member discovery. + registries: + # Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information + kubernetes: + disabled: true # Disable Kubernetes discovery registry. + # Service registry is using an external service to push and pull information about cluster members. + service: {} + # # External service endpoint. + # endpoint: https://discovery.talos.dev/ + # Etcd specific configuration options. + etcd: + # The `ca` is the root certificate authority of the PKI. + ca: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNTZ0F3SUJBZ0lSQU5pSkxOUTFZZU5ZL1c0V1pnTVR2UFF3Q2dZSUtvWkl6ajBFQXdJd0R6RU4KTUFzR0ExVUVDaE1FWlhSalpEQWVGdzB5TlRBMk1qTXdNak00TVROYUZ3MHpOVEEyTWpFd01qTTRNVE5hTUE4eApEVEFMQmdOVkJBb1RCR1YwWTJRd1dUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFUUWg0T0N3M2VVClpUajhadllHdnh6Mkd2UFdGN0NMOWFwVElxRTdzZkh5YzJ6UW1Ic1NpcGFabW1zR0kyLzZPaVJWV280V2JLeDUKSnMwRW12bkVUYmFSbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSApBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRkdGVlFqQzUxYXFtCjhyT2l6UVJXTDNkc2RvNndNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJQ2hvcm9JaVJ4b0VDZEN3dE40UFV4MGoKRUwwM1A3UGJTMDFhQWhNVHJPYkpBaUVBL09mb2RweVd0VlNIK1ZBRVBOcjZaanFOdnFEQTNWQmkraXNQY1YybwpiQUk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU8walRMSmU1TXNzUDhqK3hxbi9Dd0FxSXk1RHo2V1U4MXg2OW5sVFI4S1NvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMEllRGdzTjNsR1U0L0diMkJyOGM5aHJ6MWhld2kvV3FVeUtoTzdIeDhuTnMwSmg3RW9xVwptWnByQmlOditqb2tWVnFPRm15c2VTYk5CSnI1eEUyMmtRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + + # # The container image used to create the etcd service. + # image: gcr.io/etcd-development/etcd:v3.5.21 + + # # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from. + # advertisedSubnets: + # - 10.0.0.0/8 + # A list of urls that point to additional manifests. + extraManifests: [] + # - https://www.example.com/manifest1.yaml + # - https://www.example.com/manifest2.yaml + + # A list of inline Kubernetes manifests. + inlineManifests: [] + # - name: namespace-ci # Name of the manifest. + # contents: |- # Manifest contents as a string. + # apiVersion: v1 + # kind: Namespace + # metadata: + # name: ci + + + # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). + + # # Decryption secret example (do not use in production!). + # aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM= + + # # Core DNS specific configuration options. + # coreDNS: + # image: registry.k8s.io/coredns/coredns:v1.12.1 # The `image` field is an override to the default coredns image. + + # # External cloud provider configuration. + # externalCloudProvider: + # enabled: true # Enable external cloud provider. + # # A list of urls that point to additional manifests for an external cloud provider. + # manifests: + # - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml + # - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml + + # # A map of key value pairs that will be added while fetching the extraManifests. + # extraManifestHeaders: + # Token: "1234567" + # X-ExtraInfo: info + + # # Settings for admin kubeconfig generation. + # adminKubeconfig: + # certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year). + + # # Allows running workload on control-plane nodes. + # allowSchedulingOnControlPlanes: true diff --git a/setup/cluster-nodes/generated/secrets.yaml b/setup/cluster-nodes/generated/secrets.yaml new file mode 100644 index 0000000..8a515a5 --- /dev/null +++ b/setup/cluster-nodes/generated/secrets.yaml @@ -0,0 +1,23 @@ +cluster: + id: 1DOt3ZYTVTzEG_Q2IYnScCjz1rxZYwWRHV9hGXBu1UE= + secret: qvOKMH5RJtMOPSLBnWCPV4apReFGTd1czZ+tfz11/jI= +secrets: + bootstraptoken: ed454d.o4jsg75idc817ojs + secretboxencryptionsecret: e+8hExoi1Ap4IS6StTsScp72EXKAE2Xi+J7irS7UeG0= +trustdinfo: + token: t1yf7w.zwevymjw6v0v1q76 +certs: + etcd: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNTZ0F3SUJBZ0lSQU5pSkxOUTFZZU5ZL1c0V1pnTVR2UFF3Q2dZSUtvWkl6ajBFQXdJd0R6RU4KTUFzR0ExVUVDaE1FWlhSalpEQWVGdzB5TlRBMk1qTXdNak00TVROYUZ3MHpOVEEyTWpFd01qTTRNVE5hTUE4eApEVEFMQmdOVkJBb1RCR1YwWTJRd1dUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFUUWg0T0N3M2VVClpUajhadllHdnh6Mkd2UFdGN0NMOWFwVElxRTdzZkh5YzJ6UW1Ic1NpcGFabW1zR0kyLzZPaVJWV280V2JLeDUKSnMwRW12bkVUYmFSbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSApBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRkdGVlFqQzUxYXFtCjhyT2l6UVJXTDNkc2RvNndNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJQ2hvcm9JaVJ4b0VDZEN3dE40UFV4MGoKRUwwM1A3UGJTMDFhQWhNVHJPYkpBaUVBL09mb2RweVd0VlNIK1ZBRVBOcjZaanFOdnFEQTNWQmkraXNQY1YybwpiQUk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU8walRMSmU1TXNzUDhqK3hxbi9Dd0FxSXk1RHo2V1U4MXg2OW5sVFI4S1NvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMEllRGdzTjNsR1U0L0diMkJyOGM5aHJ6MWhld2kvV3FVeUtoTzdIeDhuTnMwSmg3RW9xVwptWnByQmlOditqb2tWVnFPRm15c2VTYk5CSnI1eEUyMmtRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + k8s: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRRWU5cFdPWEFzd09PNm9NYXNDaXRtakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTFNRFl5TXpBeU16Z3hNMW9YRFRNMU1EWXlNVEF5TXpneApNMW93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ3p0YTA1T3NWOU1NaVg4WDZEdC9xbkhWelkra2tqZ01rcjdsU1kzaERPbmVWYnBhOTJmSHlkS1QyWEgKcWN1L3FJWHpodTg0ckN0VWJuQUsyckJUekFPallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVtWEhwMmM5bGRtdFg0Y2RibDlpM0Rwd05GYzB3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBwVXVoNmIKYUMwaXdzNTh5WWVlYXVMU1JhbnEveVNUcGo2T0N4UGkvTXJpQWlFQW1DUVdRQ290NkM5b0c5TUlaeDFmMmMxcApBUFRFTHFNQm1vZ1NLSis5dXZBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpMVWF4Z2RXR0Flb1ZNRW1CYkZHUjBjbTJMK1ZxNXFsVVZMaE1USHF1ZnVvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTE8xclRrNnhYMHd5SmZ4Zm9PMytxY2RYTmo2U1NPQXlTdnVWSmplRU02ZDVWdWxyM1o4ZgpKMHBQWmNlcHk3K29oZk9HN3ppc0sxUnVjQXJhc0ZQTUF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + k8saggregator: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJZRENDQVFhZ0F3SUJBZ0lSQVB2Y2ZReS9pbWkzQUtZdm1GNnExcmd3Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU5UQTJNak13TWpNNE1UTmFGdzB6TlRBMk1qRXdNak00TVROYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVI5NjFKWXl4N2ZxSXJHaURhMTUvVFVTc2xoR2xjSWhzandvcGFpTDg0dzNiQVBaOVdQCjliRThKUnJOTUIvVGkxSUJwbm1IbitXZ3pjeFBnbmllYzZnWG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGQ29XYVB4engxL01IanlqcVR1WkhXY2hOeXoxTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklHNWdQRHhmYVhNVlMwTEJ5bDNLOENLZVRGNHlBQnV0Zk0vT0hKRGR6ZHNsQWlFQW1pVU9tOU5ma2pQY2ducEcKZzdqd0NQbzczNW5zNXV4d2RRdEZpbjdnMEhvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUxwZzZoSlBhR3A0ZmRPdkQwVGUwZklPSWJvWUdHdUM4OXBHbThWU3NYWE1vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFZmV0U1dNc2UzNmlLeG9nMnRlZjAxRXJKWVJwWENJYkk4S0tXb2kvT01OMndEMmZWai9XeApQQ1VhelRBZjA0dFNBYVo1aDUvbG9NM01UNEo0bm5Pb0Z3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + k8sserviceaccount: + key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBK21FdWh4OWxrbXJXeUdaeWIvcXZCU1hUcys0R2lPRDZ2TlJhRSsrR044WnZxWkI1CktTWFM0Q3pzZXE4V0dKOXV2Yzg0WmdaWk9wY3ZZbXM1T1BKdzE2MjJFSUlaV1FKeXdzZ0F2NWFsTWxUZ1BxLzEKejRtSjlURW5lajZVMUE4cXU1U3FYa3F5dzNZNFdsVUU1TnlhR0d3RE9yMlduTjFTMWI0YXh0V2ZLa1hxUjlFUApvOWVrK1g3UHQwdVhQV0RQQlNiSUE1V3BQRkdqN3dic1lMMTcwcW1GemwvRElUY1Q2S3ROb0lxYTVWVGpqRmRkCkRDY2VKQ3ZldFMvT1F2WG1pcXhtTnBPbW02eFhqcGxRMmNYVUg5b3NsSHREUG4vMUszNlBVWlpaZFJHU2lvQm0KM0RHZHlOU2huN0pCN0J6dmZzaGFqU3pxeExxUmNhaHhMcVdZV2hPUEJiTmVXZ0lTMm5CcDJRYXZhMUR2YkpneQpadGVVaW1EK0VUZlB1QXFZOW9OS0Z0eFVTaS9pTUpYMTM0ZFUvQVZPRXZqaXhPNnlQQjZHVUxpb0ppOHJRVG9TCmVDNStRWXFSU2RSMzhhWFo3R2VSaUlvR3BqMndRY2Y2emVoRHJTUUdabU5BZlpaenV3T1JGei9pRTJrWTBXRGwKV1p2RFlTSFNXbk5UdmZyQk0xN1pEYzNGdTRaSGNaWUpKVGdCMDJGS2kzcS9uRWdudy9zTEhHUEl3SVIvaDlidgpzcVRVMDJYaHRKQlgwYUE2RlFqaG1NTGNvLzF0ci84Y3BPVVcvdVhPM2Y5czZHMW1OY21qeDNVamJqU09xSlRnCmFYVTlGeWZJR2lYei9JcDg0Q2Jsb0wvRXJxQmVXVEQvV2twMWF1QThQcXp6emFmU3NrSzNnd2Rla2NjQ0F3RUEKQVFLQ0FnQWVQUEt4dSs5NE90eWNwWTY5aWx0dElPVTJqanU0elRSSzdpUnJmWnAxT0VzUHJKU05ZbldHRmJiVwpRWTN4YnY1MkFFV0hRTTRHNlpwVE93Vi8rZnVrZUk5aE9BSnRWd0tGYSswaDRPK0ExWEpObW56eWZHR3VaeW9sCmluMmo0ZjhKcjNSOGl4aVlJRG9YRFdjdFovSlk3N2FHSWhQRFRHYkVJZW81bllsVVFYbXFyd3RzcTA3NmJoVVMKUmNLZ0FEQ1FVVFRkQmZhWWc4MldGbEoyMlNZbFNpR1FTODFxUUt6RldwR01uc3RYMWZtSWlmRXNBTG9VVStpdQpIaUM5YlNyVFpaVzU1L2xrNzBWQWJMQ3dmdTFWanNqMzE2NXhLUTFJSEVmeWFsalJ4Q0VHc1dkNndWS1ZIZytLClAxZC9JZndra00yQUk1bG96K3ExSjNWMUtqenNxdGVyY0JKTWhuTVdEYUp5NzhZaGZSZnY0TlNieC9ObjEveW0KanpvWXVjd3pRVEhsd0dLZUhoNG12OWZxM3U5cVJoTlEzNmNjOHowcndmT01BOFpBMVJOOFhkOG82dkxkNitHSQpSbDV6eHpoZ283MXB5V0dNNlZ5L3FqK1F0aWVZVzUrMHdUNVFqbW5WL256bDZLSWZzZGU5Q0xzcG02RnhUWVJlCjE5YzAwemlOWE56V3dPMG4yeTZkaWpKamErZ0lmT0pzVFlFb2dJQ0MxczB0N0orRGU0cHV4anVyalRjMTdZYkcKK1BpejMySmFCVDByYUUxdWlQZ1lhL3Bta1plRjBZTFgzemc4NGhSNHF3WmZsaHdNNTIxKzBJRWRRb29jd2Yycgoyb25xTWlVd2NhaVZzWEVBdjJzRDlwRkd3UEg4MUplY2JBcWNmZkJTcjVPY3VvSmsyUUtDQVFFQS93Nm1EbnFUClliK3dvOEl1SUpUanFjdzYwY1RQZzRnS0tsVzJFRWNqMW5qQitaZ2xzblZheHdMZ0QyaXd1d29BZWg1OTUzWkgKbjFoVk5Eb2VCcGJXcXBRY2VuZjlWVzNUMXpNVjVhZTNYenR1MzkrTExZUlVHakV3NitlMWNrendUSlRBYndnZAp5TnM5TjNDNno0bkhmd0NqRHc2VDhWTVFpRVB6akEveEp3L1RTQzBwRHQ5cTFQZ0hMMHBQMllkdkxvYlpEajJLCkRFb1ErcVE3Tm1XeXlLWGQxWUhZK3VaTDZ1SVlYUDNLSjVWQ0N6ZjlHVHZRUi9XL29DdTgzZzdzdWM3YndCajMKYnN5aElWQUxDTXRXSFhCVDdmNXJJVlhuZHEwdGl5cGQ2NTJDTjBya20xRHZ2L0tsTjZJV01jRkFudjRPV1M0aAphdEt0a3d6SVZCdmdQd0tDQVFFQSswNGJXaDVBVmRreXRzUy9peUlOWDQ1MXZsT0YvQVNIZW5EVjVQUDYxSWpXCll3eXFDNTlSdG0rbEI5MDNvZjVSM0RqQTZXelFvNTdUOFBwSmY2di8wN2RHSzQ2alM5ODRoNEswSzZseWllUHAKUlVlbFpEVDNIbi9WK2hhTUFscnlCUFNyRlFyRkVqdmNOMWN3SmMwTEtDSVBpNGVNeGYwMEdiTHErQ0Fic0szQQpCT3N1cDVxWlNMQWcrRGpIVDdGYnpyOTBMSlN1QnFNNXp0cnJHa1NlbmxQNEtRbGFRMTdEeWlJT2tVZUMvekhFCmg2K1NJMXNla3JHeTNEK3NrQW9HZTlOMVQyL3RPM2lsYVhtdTRIdVkwa3NCckNtZ3EzVTZROXZ0aW8yRmluL1QKQkQ2Y3Z2aUkxN1RJa3lIZkZWZktvRklyOWhIT0RYdEFad2lSQnFsc2VRS0NBUUFyOFQ4a3dYT0E1TUN2QmZaaQpnS1JVamE0WWs5cllvMmgwOEwxa1FvMW5Gdmo4WW4wa0tOblI3YW5pbmJ2TkRhVVZaUWwyQmtmQ3FUcE12REtPCkdoQ3o1TDZmVHVyamUvK0NWUGZSMERwa2V0M1lUakF4VUZvWkJSNlRsaUVKcHozRFErRi9mNXQ2RG1PV21LSm0KdlNzVXMyeGtYTE9hWVNBNUNkUDg3b1l5bjZSY0RBUEYzeklOclFtMzJRcTJ4SUdnTjNWUDRjUlY1N0RUTGRaUgp3ZVd5Y2ZrdEhxamVXU3o5TTZUVTZKaWFoem1RcXoyOHlqUlJJWUs1T3EvWVppUGN3MG5TNTdwQmFabmRIbWc0ClJLZjZmRzdKVXdyci9GdmJjMnlrVEZGUUZadm9vTXVRQXJxN2pEZHd4VWtqbTFMaDBZMXhTZVJSL2lnUGJLVmEKOEU2TEFvSUJBQ1Yxc2h3UDBGVTdxQlNZWlZqdS9ZRlY4ZlVwN0JueDd1UHdkK0hHQUlpMzBRVTR1UXc4ZG1pMApZYXczYkhpSU9WbVRXQ1l6WXpKUWxaVWhLZDJQSFBaSkpudU5xb2Uvd1dScHRrT2Y0WVB1WmpJK2lNZlVJVlg1CmhrTGVJNGFpV2RzbFFXOUVpTFc4R0lwalE3a094Ry82QzhrbnJuTkEyQWhRcERmU1NXNWZwL1RUdmNPY0J1ZFAKNGNvK1pHOWJwNnk4Mnl0ZUNrYlJBK2Z5dUFMVlliT0dIc0szTXk1QnJQdXZjZTV6ODNIbzBEdk5qd0lZTGdsOQoxWVNCTlU3UFA4SXJkaHdlT2dXWWFVZThyTFdubHRNWi9TalZsNjZYTGRVNXJrSHQ4SThCbU1uVUwzZEVBdG5zCmg4MXV5aHNiV0FmbjE4ZTVSYmE2dlpIZU5BZ0RMemtDZ2dFQkFKRXRJemdmdE85UE5RSDlQOHBBTnFaclpqWkMKZGJaaThCUkNTUy83b2tHeWtkQzZsdFJVd0c2ajIxNXZZcjlmVW94ODU5ZkI0RjhEK0FXWHNKSm44YnlQZFBuWQpwU1ZRMEV3a045aTE4WnowTitEbWVZRC9CUmdyVHA1OVU1Mmo5Q1ZyQWNQRlphVk00b0xwaEVSeDZvSURPOEcvCk9wUEZkVnJvMFhyN1lpbENiYVJLVWVWbjZWKzFIZ25zelhiUE9sakhrcGdXSXdKb1RkNkVWVDlvbXNVeFlVejcKRUR5L2RXNmVxVFBMUHR5Q2hKZlo5WDB6M09uUWpLbzcwdHhQa1VRTmw0azhqMU9mMFhMaklacmd6MmVub0FRZgpQYXhSc1lCckhNVnI5eStXaDhZdFdESGx1NUU4NlNaMXNIaHphOHhZZWpoQXRndzdqa0FyNWcxL2dOYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K + os: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJT0hOamQ1blVzdVRGRXpsQmtFOVhkZUJ4b1AxMk9mY2R4a0tjQmZlU0xKbgotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K diff --git a/setup/cluster-nodes/generated/talosconfig b/setup/cluster-nodes/generated/talosconfig new file mode 100644 index 0000000..4b655d6 --- /dev/null +++ b/setup/cluster-nodes/generated/talosconfig @@ -0,0 +1,7 @@ +context: demo-cluster +contexts: + demo-cluster: + endpoints: [] + ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJLVENCM0tBREFnRUNBaEVBc0xlWW83MXVpVUlEK3RUSEkrbFJjakFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTWpZd05qSXpNREl6T0RFeldqQVRNUkV3RHdZRApWUVFLRXdodmN6cGhaRzFwYmpBcU1BVUdBeXRsY0FNaEFPek5qd2FncnBYMFc0TWs1OWpoTmtRVU5UTUNobHFoCklnb1lrWnNkWUdhc28wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0I0QXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0h3WURWUjBqQkJnd0ZvQVV5OEhHSnlSYjI1bDUyMStLZE96N081SjRyNW93QlFZREsyVndBMEVBSmtOcwpGUDZMWUltTDhjR2l3TTRYc0FyZE9XVTdUMzhOaWpvTys2VE80cWRYZTdYSXZwTEZTeXFqRTBuREZtenpmdGVKCk9PM3BiMHlFUmtLcG1rNUJCUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJQmhtWXY4Wk5kaVVBMG5mbHAvT3VOdHJiM09Rc2xkZUc4cEU2YkFKTStENwotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K diff --git a/setup/cluster-nodes/generated/worker.yaml b/setup/cluster-nodes/generated/worker.yaml new file mode 100644 index 0000000..b51f152 --- /dev/null +++ b/setup/cluster-nodes/generated/worker.yaml @@ -0,0 +1,606 @@ +version: v1alpha1 # Indicates the schema used to decode the contents. +debug: false # Enable verbose logging to the console. +persist: true +# Provides machine specific configuration options. +machine: + type: worker # Defines the role of the machine within the cluster. + token: t1yf7w.zwevymjw6v0v1q76 # The `token` is used by a machine to join the PKI of the cluster. + # The root certificate authority of the PKI. + ca: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: "" + # Extra certificate subject alternative names for the machine's certificate. + certSANs: [] + # # Uncomment this to enable SANs. + # - 10.0.0.10 + # - 172.16.0.10 + # - 192.168.0.10 + + # Used to provide additional options to the kubelet. + kubelet: + image: ghcr.io/siderolabs/kubelet:v1.33.1 # The `image` field is an optional reference to an alternative kubelet image. + defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile. + disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory. + + # # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list. + # clusterDNS: + # - 10.96.0.10 + # - 169.254.2.53 + + # # The `extraArgs` field is used to provide additional flags to the kubelet. + # extraArgs: + # key: value + + # # The `extraMounts` field is used to add additional mounts to the kubelet container. + # extraMounts: + # - destination: /var/lib/example # Destination is the absolute path where the mount will be placed in the container. + # type: bind # Type specifies the mount kind. + # source: /var/lib/example # Source specifies the source path of the mount. + # # Options are fstab style mount options. + # options: + # - bind + # - rshared + # - rw + + # # The `extraConfig` field is used to provide kubelet configuration overrides. + # extraConfig: + # serverTLSBootstrap: true + + # # The `KubeletCredentialProviderConfig` field is used to provide kubelet credential configuration. + # credentialProviderConfig: + # apiVersion: kubelet.config.k8s.io/v1 + # kind: CredentialProviderConfig + # providers: + # - apiVersion: credentialprovider.kubelet.k8s.io/v1 + # defaultCacheDuration: 12h + # matchImages: + # - '*.dkr.ecr.*.amazonaws.com' + # - '*.dkr.ecr.*.amazonaws.com.cn' + # - '*.dkr.ecr-fips.*.amazonaws.com' + # - '*.dkr.ecr.us-iso-east-1.c2s.ic.gov' + # - '*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov' + # name: ecr-credential-provider + + # # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet. + # nodeIP: + # # The `validSubnets` field configures the networks to pick kubelet node IP from. + # validSubnets: + # - 10.0.0.0/8 + # - '!10.0.0.3/32' + # - fdc7::/16 + # Provides machine specific network configuration options. + network: {} + # # `interfaces` is used to define the network interface configuration. + # interfaces: + # - interface: enp0s1 # The interface name. + # # Assigns static IP addresses to the interface. + # addresses: + # - 192.168.2.0/24 + # # A list of routes associated with the interface. + # routes: + # - network: 0.0.0.0/0 # The route's network (destination). + # gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route). + # metric: 1024 # The optional metric for the route. + # mtu: 1500 # The interface's MTU. + # + # # # Picks a network device using the selector. + + # # # select a device with bus prefix 00:*. + # # deviceSelector: + # # busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard. + # # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver. + # # deviceSelector: + # # hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard. + # # driver: virtio_net # Kernel driver, supports matching by wildcard. + # # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver. + # # deviceSelector: + # # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard. + # # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard. + # # driver: virtio_net # Kernel driver, supports matching by wildcard. + + # # # Bond specific options. + # # bond: + # # # The interfaces that make up the bond. + # # interfaces: + # # - enp2s0 + # # - enp2s1 + # # # Picks a network device using the selector. + # # deviceSelectors: + # # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard. + # # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard. + # # driver: virtio_net # Kernel driver, supports matching by wildcard. + # # mode: 802.3ad # A bond option. + # # lacpRate: fast # A bond option. + + # # # Bridge specific options. + # # bridge: + # # # The interfaces that make up the bridge. + # # interfaces: + # # - enxda4042ca9a51 + # # - enxae2a6774c259 + # # # Enable STP on this bridge. + # # stp: + # # enabled: true # Whether Spanning Tree Protocol (STP) is enabled. + + # # # Configure this device as a bridge port. + # # bridgePort: + # # master: br0 # The name of the bridge master interface + + # # # Indicates if DHCP should be used to configure the interface. + # # dhcp: true + + # # # DHCP specific options. + # # dhcpOptions: + # # routeMetric: 1024 # The priority of all routes received via DHCP. + + # # # Wireguard specific configuration. + + # # # wireguard server example + # # wireguard: + # # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded). + # # listenPort: 51111 # Specifies a device's listening port. + # # # Specifies a list of peer configurations to apply to a device. + # # peers: + # # - publicKey: ABCDEF... # Specifies the public key of this peer. + # # endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry. + # # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer. + # # allowedIPs: + # # - 192.168.1.0/24 + # # # wireguard peer example + # # wireguard: + # # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded). + # # # Specifies a list of peer configurations to apply to a device. + # # peers: + # # - publicKey: ABCDEF... # Specifies the public key of this peer. + # # endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry. + # # persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer. + # # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer. + # # allowedIPs: + # # - 192.168.1.0/24 + + # # # Virtual (shared) IP address configuration. + + # # # layer2 vip example + # # vip: + # # ip: 172.16.199.55 # Specifies the IP address to be used. + + # # Used to statically set the nameservers for the machine. + # nameservers: + # - 8.8.8.8 + # - 1.1.1.1 + + # # Used to statically set arbitrary search domains. + # searchDomains: + # - example.org + # - example.com + + # # Allows for extra entries to be added to the `/etc/hosts` file + # extraHostEntries: + # - ip: 192.168.1.100 # The IP of the host. + # # The host alias. + # aliases: + # - example + # - example.domain.tld + + # # Configures KubeSpan feature. + # kubespan: + # enabled: true # Enable the KubeSpan feature. + + # Used to provide instructions for installations. + install: + disk: /dev/sda # The disk used for installations. + image: ghcr.io/siderolabs/installer:v1.10.3 # Allows for supplying the image used to perform the installation. + wipe: false # Indicates if the installation disk should be wiped at installation time. + + # # Look up disk using disk attributes like model, size, serial and others. + # diskSelector: + # size: 4GB # Disk size. + # model: WDC* # Disk model `/sys/block//device/model`. + # busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path. + + # # Allows for supplying extra kernel args via the bootloader. + # extraKernelArgs: + # - talos.platform=metal + # - reboot=k + # Used to configure the machine's container image registry mirrors. + registries: {} + # # Specifies mirror configuration for each registry host namespace. + # mirrors: + # ghcr.io: + # # List of endpoints (URLs) for registry mirrors to use. + # endpoints: + # - https://registry.insecure + # - https://ghcr.io/v2/ + + # # Specifies TLS & auth configuration for HTTPS image registries. + # config: + # registry.insecure: + # # The TLS configuration for the registry. + # tls: + # insecureSkipVerify: true # Skip TLS server certificate verification (not recommended). + # + # # # Enable mutual TLS authentication with the registry. + # # clientIdentity: + # # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t + # # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ== + # + # # # The auth configuration for this registry. + # # auth: + # # username: username # Optional registry authentication. + # # password: password # Optional registry authentication. + + # Features describe individual Talos features that can be switched on or off. + features: + rbac: true # Enable role-based access control (RBAC). + stableHostname: true # Enable stable default hostname. + apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid. + diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks. + # KubePrism - local proxy/load balancer on defined port that will distribute + kubePrism: + enabled: true # Enable KubePrism support - will start local load balancing proxy. + port: 7445 # KubePrism port. + # Configures host DNS caching resolver. + hostDNS: + enabled: true # Enable host DNS caching resolver. + forwardKubeDNSToHost: true # Use the host DNS resolver as upstream for Kubernetes CoreDNS pods. + + # # Configure Talos API access from Kubernetes pods. + # kubernetesTalosAPIAccess: + # enabled: true # Enable Talos API access from Kubernetes pods. + # # The list of Talos API roles which can be granted for access from Kubernetes pods. + # allowedRoles: + # - os:reader + # # The list of Kubernetes namespaces Talos API access is available from. + # allowedKubernetesNamespaces: + # - kube-system + + # # Provides machine specific control plane configuration options. + + # # ControlPlane definition example. + # controlPlane: + # # Controller manager machine specific configuration options. + # controllerManager: + # disabled: false # Disable kube-controller-manager on the node. + # # Scheduler machine specific configuration options. + # scheduler: + # disabled: true # Disable kube-scheduler on the node. + + # # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver. + + # # nginx static pod. + # pods: + # - apiVersion: v1 + # kind: pod + # metadata: + # name: nginx + # spec: + # containers: + # - image: nginx + # name: nginx + + # # Allows the addition of user specified files. + + # # MachineFiles usage example. + # files: + # - content: '...' # The contents of the file. + # permissions: 0o666 # The file's permissions in octal. + # path: /tmp/file.txt # The path of the file. + # op: append # The operation to use + + # # The `env` field allows for the addition of environment variables. + + # # Environment variables definition examples. + # env: + # GRPC_GO_LOG_SEVERITY_LEVEL: info + # GRPC_GO_LOG_VERBOSITY_LEVEL: "99" + # https_proxy: http://SERVER:PORT/ + # env: + # GRPC_GO_LOG_SEVERITY_LEVEL: error + # https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/ + # env: + # https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/ + + # # Used to configure the machine's time settings. + + # # Example configuration for cloudflare ntp server. + # time: + # disabled: false # Indicates if the time service is disabled for the machine. + # # description: | + # servers: + # - time.cloudflare.com + # bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence. + + # # Used to configure the machine's sysctls. + + # # MachineSysctls usage example. + # sysctls: + # kernel.domainname: talos.dev + # net.ipv4.ip_forward: "0" + # net/ipv6/conf/eth0.100/disable_ipv6: "1" + + # # Used to configure the machine's sysfs. + + # # MachineSysfs usage example. + # sysfs: + # devices.system.cpu.cpu0.cpufreq.scaling_governor: performance + + # # Machine system disk encryption configuration. + # systemDiskEncryption: + # # Ephemeral partition encryption. + # ephemeral: + # provider: luks2 # Encryption provider to use for the encryption. + # # Defines the encryption keys generation and storage method. + # keys: + # - # Deterministically generated key from the node UUID and PartitionLabel. + # nodeID: {} + # slot: 0 # Key slot number for LUKS2 encryption. + # + # # # KMS managed encryption key. + # # kms: + # # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key. + # + # # # Cipher kind to use for the encryption. Depends on the encryption provider. + # # cipher: aes-xts-plain64 + + # # # Defines the encryption sector size. + # # blockSize: 4096 + + # # # Additional --perf parameters for the LUKS2 encryption. + # # options: + # # - no_read_workqueue + # # - no_write_workqueue + + # # Configures the udev system. + # udev: + # # List of udev rules to apply to the udev system + # rules: + # - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660" + + # # Configures the logging system. + # logging: + # # Logging destination. + # destinations: + # - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp". + # format: json_lines # Logs format. + + # # Configures the kernel. + # kernel: + # # Kernel modules to load. + # modules: + # - name: brtfs # Module name. + + # # Configures the seccomp profiles for the machine. + # seccompProfiles: + # - name: audit.json # The `name` field is used to provide the file name of the seccomp profile. + # # The `value` field is used to provide the seccomp profile. + # value: + # defaultAction: SCMP_ACT_LOG + + # # Override (patch) settings in the default OCI runtime spec for CRI containers. + + # # override default open file limit + # baseRuntimeSpecOverrides: + # process: + # rlimits: + # - hard: 1024 + # soft: 1024 + # type: RLIMIT_NOFILE + + # # Configures the node labels for the machine. + + # # node labels example. + # nodeLabels: + # exampleLabel: exampleLabelValue + + # # Configures the node annotations for the machine. + + # # node annotations example. + # nodeAnnotations: + # customer.io/rack: r13a25 + + # # Configures the node taints for the machine. Effect is optional. + + # # node taints example. + # nodeTaints: + # exampleTaint: exampleTaintValue:NoSchedule +# Provides cluster specific configuration options. +cluster: + id: 1DOt3ZYTVTzEG_Q2IYnScCjz1rxZYwWRHV9hGXBu1UE= # Globally unique identifier for this cluster (base64 encoded random 32 bytes). + secret: qvOKMH5RJtMOPSLBnWCPV4apReFGTd1czZ+tfz11/jI= # Shared secret of cluster (base64 encoded random 32 bytes). + # Provides control plane specific configuration options. + controlPlane: + endpoint: https://192.168.8.30:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname. + clusterName: demo-cluster # Configures the cluster's name. + # Provides cluster specific network configuration options. + network: + dnsDomain: cluster.local # The domain used by Kubernetes DNS. + # The pod subnet CIDR. + podSubnets: + - 10.244.0.0/16 + # The service subnet CIDR. + serviceSubnets: + - 10.96.0.0/12 + + # # The CNI used. + # cni: + # name: custom # Name of CNI to use. + # # URLs containing manifests to apply for the CNI. + # urls: + # - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml + token: ed454d.o4jsg75idc817ojs # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster. + # The base64 encoded root certificate authority used by Kubernetes. + ca: + crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRRWU5cFdPWEFzd09PNm9NYXNDaXRtakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTFNRFl5TXpBeU16Z3hNMW9YRFRNMU1EWXlNVEF5TXpneApNMW93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ3p0YTA1T3NWOU1NaVg4WDZEdC9xbkhWelkra2tqZ01rcjdsU1kzaERPbmVWYnBhOTJmSHlkS1QyWEgKcWN1L3FJWHpodTg0ckN0VWJuQUsyckJUekFPallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVtWEhwMmM5bGRtdFg0Y2RibDlpM0Rwd05GYzB3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBwVXVoNmIKYUMwaXdzNTh5WWVlYXVMU1JhbnEveVNUcGo2T0N4UGkvTXJpQWlFQW1DUVdRQ290NkM5b0c5TUlaeDFmMmMxcApBUFRFTHFNQm1vZ1NLSis5dXZBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + key: "" + # Configures cluster member discovery. + discovery: + enabled: true # Enable the cluster membership discovery feature. + # Configure registries used for cluster member discovery. + registries: + # Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information + kubernetes: + disabled: true # Disable Kubernetes discovery registry. + # Service registry is using an external service to push and pull information about cluster members. + service: {} + # # External service endpoint. + # endpoint: https://discovery.talos.dev/ + + # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). + + # # Decryption secret example (do not use in production!). + # aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM= + + # # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). + + # # Decryption secret example (do not use in production!). + # secretboxEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM= + + # # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation. + + # # AggregatorCA example. + # aggregatorCA: + # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t + # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ== + + # # The base64 encoded private key for service account token generation. + + # # AggregatorCA example. + # serviceAccount: + # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ== + + # # API server specific configuration options. + # apiServer: + # image: registry.k8s.io/kube-apiserver:v1.33.1 # The container image used in the API server manifest. + # # Extra arguments to supply to the API server. + # extraArgs: + # feature-gates: ServerSideApply=true + # http2-max-streams-per-connection: "32" + # # Extra certificate subject alternative names for the API server's certificate. + # certSANs: + # - 1.2.3.4 + # - 4.5.6.7 + # # Configure the API server admission plugins. + # admissionControl: + # - name: PodSecurity # Name is the name of the admission controller. + # # Configuration is an embedded configuration object to be used as the plugin's + # configuration: + # apiVersion: pod-security.admission.config.k8s.io/v1alpha1 + # defaults: + # audit: restricted + # audit-version: latest + # enforce: baseline + # enforce-version: latest + # warn: restricted + # warn-version: latest + # exemptions: + # namespaces: + # - kube-system + # runtimeClasses: [] + # usernames: [] + # kind: PodSecurityConfiguration + # # Configure the API server audit policy. + # auditPolicy: + # apiVersion: audit.k8s.io/v1 + # kind: Policy + # rules: + # - level: Metadata + # # Configure the API server authorization config. Node and RBAC authorizers are always added irrespective of the configuration. + # authorizationConfig: + # - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`. + # name: webhook # Name is used to describe the authorizer. + # # webhook is the configuration for the webhook authorizer. + # webhook: + # connectionInfo: + # type: InClusterConfig + # failurePolicy: Deny + # matchConditionSubjectAccessReviewVersion: v1 + # matchConditions: + # - expression: has(request.resourceAttributes) + # - expression: '!(\''system:serviceaccounts:kube-system\'' in request.groups)' + # subjectAccessReviewVersion: v1 + # timeout: 3s + # - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`. + # name: in-cluster-authorizer # Name is used to describe the authorizer. + # # webhook is the configuration for the webhook authorizer. + # webhook: + # connectionInfo: + # type: InClusterConfig + # failurePolicy: NoOpinion + # matchConditionSubjectAccessReviewVersion: v1 + # subjectAccessReviewVersion: v1 + # timeout: 3s + + # # Controller manager server specific configuration options. + # controllerManager: + # image: registry.k8s.io/kube-controller-manager:v1.33.1 # The container image used in the controller manager manifest. + # # Extra arguments to supply to the controller manager. + # extraArgs: + # feature-gates: ServerSideApply=true + + # # Kube-proxy server-specific configuration options + # proxy: + # disabled: false # Disable kube-proxy deployment on cluster bootstrap. + # image: registry.k8s.io/kube-proxy:v1.33.1 # The container image used in the kube-proxy manifest. + # mode: ipvs # proxy mode of kube-proxy. + # # Extra arguments to supply to kube-proxy. + # extraArgs: + # proxy-mode: iptables + + # # Scheduler server specific configuration options. + # scheduler: + # image: registry.k8s.io/kube-scheduler:v1.33.1 # The container image used in the scheduler manifest. + # # Extra arguments to supply to the scheduler. + # extraArgs: + # feature-gates: AllBeta=true + + # # Etcd specific configuration options. + # etcd: + # image: gcr.io/etcd-development/etcd:v3.5.21 # The container image used to create the etcd service. + # # The `ca` is the root certificate authority of the PKI. + # ca: + # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t + # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ== + # # Extra arguments to supply to etcd. + # extraArgs: + # election-timeout: "5000" + # # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from. + # advertisedSubnets: + # - 10.0.0.0/8 + + # # Core DNS specific configuration options. + # coreDNS: + # image: registry.k8s.io/coredns/coredns:v1.12.1 # The `image` field is an override to the default coredns image. + + # # External cloud provider configuration. + # externalCloudProvider: + # enabled: true # Enable external cloud provider. + # # A list of urls that point to additional manifests for an external cloud provider. + # manifests: + # - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml + # - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml + + # # A list of urls that point to additional manifests. + # extraManifests: + # - https://www.example.com/manifest1.yaml + # - https://www.example.com/manifest2.yaml + + # # A map of key value pairs that will be added while fetching the extraManifests. + # extraManifestHeaders: + # Token: "1234567" + # X-ExtraInfo: info + + # # A list of inline Kubernetes manifests. + # inlineManifests: + # - name: namespace-ci # Name of the manifest. + # contents: |- # Manifest contents as a string. + # apiVersion: v1 + # kind: Namespace + # metadata: + # name: ci + + # # Settings for admin kubeconfig generation. + # adminKubeconfig: + # certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year). + + # # Allows running workload on control-plane nodes. + # allowSchedulingOnControlPlanes: true diff --git a/setup/cluster-nodes/init-cluster.sh b/setup/cluster-nodes/init-cluster.sh new file mode 100755 index 0000000..33ba83e --- /dev/null +++ b/setup/cluster-nodes/init-cluster.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Talos cluster initialization script +# This script performs one-time cluster setup: generates secrets, base configs, and sets up talosctl + +set -euo pipefail + +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`." + exit 1 +fi + +NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes" + +# Get cluster configuration from config.yaml +CLUSTER_NAME=$(wild-config cluster.name) +VIP=$(wild-config cluster.nodes.control.vip) +TALOS_VERSION=$(wild-config cluster.nodes.talos.version) + +echo "Initializing Talos cluster: $CLUSTER_NAME" +echo "VIP: $VIP" +echo "Talos version: $TALOS_VERSION" + +# Create directories +mkdir -p generated final patch + +# Check if cluster secrets already exist +if [ -f "generated/secrets.yaml" ]; then + echo "" + echo "⚠️ Cluster secrets already exist!" + echo "This will regenerate ALL cluster certificates and invalidate existing nodes." + echo "" + read -p "Do you want to continue? (y/N): " -r + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Cancelled." + exit 0 + fi + echo "" +fi + +# Generate fresh cluster secrets +echo "Generating cluster secrets..." +cd generated +talosctl gen secrets -o secrets.yaml --force + +echo "Generating base machine configs..." +talosctl gen config --with-secrets secrets.yaml "$CLUSTER_NAME" "https://$VIP:6443" --force +cd .. + +# Setup talosctl context +echo "Setting up talosctl context..." + +# Remove existing context if it exists +talosctl config context "$CLUSTER_NAME" --remove 2>/dev/null || true + +# Merge new configuration +talosctl config merge ./generated/talosconfig +talosctl config endpoint "$VIP" + +echo "" +echo "✅ Cluster initialization complete!" +echo "" +echo "Cluster details:" +echo " - Name: $CLUSTER_NAME" +echo " - VIP: $VIP" +echo " - Secrets: generated/secrets.yaml" +echo " - Base configs: generated/controlplane.yaml, generated/worker.yaml" +echo "" +echo "Talosctl context configured:" +talosctl config info +echo "" +echo "Next steps:" +echo "1. Register nodes with hardware detection:" +echo " ./detect-node-hardware.sh " +echo "" +echo "2. Generate machine configurations:" +echo " ./generate-machine-configs.sh" +echo "" +echo "3. Apply configurations to nodes" \ No newline at end of file diff --git a/setup/cluster-nodes/old/setup_node.sh b/setup/cluster-nodes/old/setup_node.sh deleted file mode 100755 index 4e8b6a3..0000000 --- a/setup/cluster-nodes/old/setup_node.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -set -e - -apt-get update - -# Longhorn requirements - -# Install iscsi on all nodes. -# apt-get install open-iscsi -# modprobe iscsi_tcp -# systemctl restart open-iscsi -kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/prerequisite/longhorn-iscsi-installation.yaml - -# Install NFSv4 client on all nodes. -# apt-get install nfs-common -kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/prerequisite/longhorn-nfs-installation.yaml - -apt-get install cryptsetup - -# To check longhorn requirements: -# curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/scripts/environment_check.sh | bash diff --git a/setup/cluster-nodes/patch.templates/controlplane-node-1.yaml b/setup/cluster-nodes/patch.templates/controlplane-node-1.yaml new file mode 100644 index 0000000..eb8e72b --- /dev/null +++ b/setup/cluster-nodes/patch.templates/controlplane-node-1.yaml @@ -0,0 +1,22 @@ +machine: + install: + disk: {{ .cluster.nodes.control.node1.disk }} + image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}} + network: + interfaces: + - interface: {{ .cluster.nodes.control.node1.interface }} + dhcp: false + addresses: + - {{ .cluster.nodes.control.node1.ip }}/24 + routes: + - network: 0.0.0.0/0 + gateway: {{ .cloud.router.ip }} + vip: + ip: {{ .cluster.nodes.control.vip }} +cluster: + discovery: + enabled: true + registries: + service: + disabled: true + allowSchedulingOnControlPlanes: true diff --git a/setup/cluster-nodes/patch.templates/controlplane-node-2.yaml b/setup/cluster-nodes/patch.templates/controlplane-node-2.yaml new file mode 100644 index 0000000..d1ebd6f --- /dev/null +++ b/setup/cluster-nodes/patch.templates/controlplane-node-2.yaml @@ -0,0 +1,22 @@ +machine: + install: + disk: {{ .cluster.nodes.control.node2.disk }} + image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}} + network: + interfaces: + - interface: {{ .cluster.nodes.control.node2.interface }} + dhcp: false + addresses: + - {{ .cluster.nodes.control.node2.ip }}/24 + routes: + - network: 0.0.0.0/0 + gateway: {{ .cloud.router.ip }} + vip: + ip: {{ .cluster.nodes.control.vip }} +cluster: + discovery: + enabled: true + registries: + service: + disabled: true + allowSchedulingOnControlPlanes: true diff --git a/setup/cluster-nodes/patch.templates/controlplane-node-3.yaml b/setup/cluster-nodes/patch.templates/controlplane-node-3.yaml new file mode 100644 index 0000000..d93d339 --- /dev/null +++ b/setup/cluster-nodes/patch.templates/controlplane-node-3.yaml @@ -0,0 +1,22 @@ +machine: + install: + disk: {{ .cluster.nodes.control.node3.disk }} + image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}} + network: + interfaces: + - interface: {{ .cluster.nodes.control.node3.interface }} + dhcp: false + addresses: + - {{ .cluster.nodes.control.node3.ip }}/24 + routes: + - network: 0.0.0.0/0 + gateway: {{ .cloud.router.ip }} + vip: + ip: {{ .cluster.nodes.control.vip }} +cluster: + discovery: + enabled: true + registries: + service: + disabled: true + allowSchedulingOnControlPlanes: true diff --git a/setup/cluster-nodes/patch.templates/worker.yaml b/setup/cluster-nodes/patch.templates/worker.yaml new file mode 100644 index 0000000..3c308f4 --- /dev/null +++ b/setup/cluster-nodes/patch.templates/worker.yaml @@ -0,0 +1,22 @@ +machine: + install: + disk: /dev/sdc + network: + interfaces: + - interface: enp4s0 + dhcp: true + kubelet: + extraMounts: + - destination: /var/lib/longhorn + type: bind + source: /var/lib/longhorn + options: + - bind + - rshared + - rw +# NOTE: System extensions need to be added via Talos Image Factory +# customization: +# systemExtensions: +# officialExtensions: +# - siderolabs/iscsi-tools +# - siderolabs/util-linux-tools \ No newline at end of file diff --git a/setup/cluster-nodes/patch/.gitkeep b/setup/cluster-nodes/patch/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster-nodes/patch/controlplane.yaml b/setup/cluster-nodes/patch/controlplane.yaml deleted file mode 100644 index 409de0b..0000000 --- a/setup/cluster-nodes/patch/controlplane.yaml +++ /dev/null @@ -1,17 +0,0 @@ -machine: - install: - disk: /dev/sdc - network: - interfaces: - - interface: eth0 - vip: - ip: 192.168.8.20 - - interface: eth1 - dhcp: true -cluster: - discovery: - enabled: true - registries: - service: - disabled: true - allowSchedulingOnControlPlanes: true diff --git a/setup/cluster-nodes/patch/worker.yaml b/setup/cluster-nodes/patch/worker.yaml deleted file mode 100644 index a286018..0000000 --- a/setup/cluster-nodes/patch/worker.yaml +++ /dev/null @@ -1,3 +0,0 @@ -machine: - install: - disk: /dev/sdc diff --git a/setup/cluster/README.md b/setup/cluster/README.md index 13cad45..a23d906 100644 --- a/setup/cluster/README.md +++ b/setup/cluster/README.md @@ -19,22 +19,16 @@ Internet → External DNS → MetalLB LoadBalancer → Traefik → Kubernetes Se ## Key Components -- **MetalLB** - Provides load balancing for bare metal clusters -- **Traefik** - Handles ingress traffic, TLS termination, and routing -- **cert-manager** - Manages TLS certificates -- **CoreDNS** - Provides DNS resolution for services -- **Longhorn** - Distributed storage system for persistent volumes -- **NFS** - Network file system for shared media storage (optional) -- **Kubernetes Dashboard** - Web UI for cluster management (accessible via https://dashboard.internal.${DOMAIN}) -- **Docker Registry** - Private container registry for custom images - -## Configuration Approach - -All infrastructure components use a consistent configuration approach: - -1. **Environment Variables** - All configuration settings are managed using environment variables loaded by running `source load-env.sh` -2. **Template Files** - Configuration files use templates with `${VARIABLE}` syntax -3. **Setup Scripts** - Each component has a dedicated script in `infrastructure_setup/` for installation and configuration +- **[MetalLB](metallb/README.md)** - Provides load balancing for bare metal clusters +- **[Traefik](traefik/README.md)** - Handles ingress traffic, TLS termination, and routing +- **[cert-manager](cert-manager/README.md)** - Manages TLS certificates +- **[CoreDNS](coredns/README.md)** - Provides DNS resolution for services +- **[ExternalDNS](externaldns/README.md)** - Automatic DNS record management +- **[Longhorn](longhorn/README.md)** - Distributed storage system for persistent volumes +- **[NFS](nfs/README.md)** - Network file system for shared media storage (optional) +- **[Kubernetes Dashboard](kubernetes-dashboard/README.md)** - Web UI for cluster management (accessible via https://dashboard.internal.${DOMAIN}) +- **[Docker Registry](docker-registry/README.md)** - Private container registry for custom images +- **[Utils](utils/README.md)** - Cluster utilities and debugging tools ## Idempotent Design @@ -47,55 +41,3 @@ All setup scripts are designed to be idempotent: - Changes to configuration will be properly applied on subsequent runs This idempotent approach ensures consistent, reliable infrastructure setup and allows for incremental changes without requiring a complete teardown and rebuild. - -## NFS Setup (Optional) - -The infrastructure supports optional NFS (Network File System) for shared media storage across the cluster: - -### Host Setup - -First, set up the NFS server on your chosen host: - -```bash -# Set required environment variables -export NFS_HOST=box-01 # Hostname or IP of NFS server -export NFS_MEDIA_PATH=/data/media # Path to media directory -export NFS_STORAGE_CAPACITY=1Ti # Optional: PV size (default: 250Gi) - -# Run host setup script on the NFS server -./infrastructure_setup/setup-nfs-host.sh -``` - -### Cluster Integration - -Then integrate NFS with your Kubernetes cluster: - -```bash -# Run cluster setup (part of setup-all.sh or standalone) -./infrastructure_setup/setup-nfs.sh -``` - -### Features - -- **Automatic IP detection** - Uses network IP even when hostname resolves to localhost -- **Cluster-wide access** - Any pod can mount the NFS share regardless of node placement -- **Configurable capacity** - Set PersistentVolume size via `NFS_STORAGE_CAPACITY` -- **ReadWriteMany** - Multiple pods can simultaneously access the same storage - -### Usage - -Applications can use NFS storage by setting `storageClassName: nfs` in their PVCs: - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: media-pvc -spec: - accessModes: - - ReadWriteMany - storageClassName: nfs - resources: - requests: - storage: 100Gi -``` diff --git a/setup/cluster/cert-manager/README.md b/setup/cluster/cert-manager/README.md new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster/setup-cert-manager.sh b/setup/cluster/cert-manager/install.sh similarity index 67% rename from setup/cluster/setup-cert-manager.sh rename to setup/cluster/cert-manager/install.sh index 226ec2d..b76c509 100755 --- a/setup/cluster/setup-cert-manager.sh +++ b/setup/cluster/cert-manager/install.sh @@ -1,20 +1,19 @@ #!/bin/bash set -e -# Navigate to script directory -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 fi -echo "Setting up cert-manager..." +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +CERT_MANAGER_DIR="${CLUSTER_SETUP_DIR}/cert-manager" -# Create cert-manager namespace -kubectl create namespace cert-manager --dry-run=client -o yaml | kubectl apply -f - +# Process templates with wild-compile-template-dir +echo "Processing cert-manager templates..." +wild-compile-template-dir --clean ${CERT_MANAGER_DIR}/kustomize.template ${CERT_MANAGER_DIR}/kustomize + +echo "Setting up cert-manager..." # Install cert-manager using the official installation method # This installs CRDs, controllers, and webhook components @@ -34,23 +33,12 @@ echo "Waiting additional time for cert-manager webhook to be fully operational.. sleep 30 # Setup Cloudflare API token for DNS01 challenges -if [[ -n "${CLOUDFLARE_API_TOKEN}" ]]; then - echo "Creating Cloudflare API token secret in cert-manager namespace..." - kubectl create secret generic cloudflare-api-token \ - --namespace cert-manager \ - --from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \ - --dry-run=client -o yaml | kubectl apply -f - -else - echo "Warning: CLOUDFLARE_API_TOKEN not set. DNS01 challenges will not work." -fi - -echo "Creating Let's Encrypt issuers..." -cat ${SCRIPT_DIR}/cert-manager/letsencrypt-staging-dns01.yaml | envsubst | kubectl apply -f - -cat ${SCRIPT_DIR}/cert-manager/letsencrypt-prod-dns01.yaml | envsubst | kubectl apply -f - - -# Wait for issuers to be ready -echo "Waiting for Let's Encrypt issuers to be ready..." -sleep 10 +echo "Creating Cloudflare API token secret..." +CLOUDFLARE_API_TOKEN=$(wild-secret cluster.certManager.cloudflare.apiToken) || exit 1 +kubectl create secret generic cloudflare-api-token \ + --namespace cert-manager \ + --from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \ + --dry-run=client -o yaml | kubectl apply -f - # Configure cert-manager to use external DNS for challenge verification echo "Configuring cert-manager to use external DNS servers..." @@ -75,10 +63,13 @@ spec: echo "Waiting for cert-manager to restart with new DNS configuration..." kubectl rollout status deployment/cert-manager -n cert-manager --timeout=120s -# Apply wildcard certificates -echo "Creating wildcard certificates..." -cat ${SCRIPT_DIR}/cert-manager/internal-wildcard-certificate.yaml | envsubst | kubectl apply -f - -cat ${SCRIPT_DIR}/cert-manager/wildcard-certificate.yaml | envsubst | kubectl apply -f - +# Apply Let's Encrypt issuers and certificates using kustomize +echo "Creating Let's Encrypt issuers and certificates..." +kubectl apply -k ${CERT_MANAGER_DIR}/kustomize + +# Wait for issuers to be ready +echo "Waiting for Let's Encrypt issuers to be ready..." +sleep 10 echo "Wildcard certificate creation initiated. This may take some time to complete depending on DNS propagation." # Wait for the certificates to be issued (with a timeout) @@ -91,3 +82,4 @@ echo "" echo "To verify the installation:" echo " kubectl get pods -n cert-manager" echo " kubectl get clusterissuers" +echo " kubectl get certificates -n cert-manager" diff --git a/setup/cluster/cert-manager/cert-manager.yaml b/setup/cluster/cert-manager/kustomize.template/cert-manager.yaml similarity index 100% rename from setup/cluster/cert-manager/cert-manager.yaml rename to setup/cluster/cert-manager/kustomize.template/cert-manager.yaml diff --git a/setup/cluster/cert-manager/kustomize.template/internal-wildcard-certificate.yaml b/setup/cluster/cert-manager/kustomize.template/internal-wildcard-certificate.yaml new file mode 100644 index 0000000..67fac06 --- /dev/null +++ b/setup/cluster/cert-manager/kustomize.template/internal-wildcard-certificate.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: wildcard-internal-wild-cloud + namespace: cert-manager +spec: + secretName: wildcard-internal-wild-cloud-tls + dnsNames: + - "*.{{ .cloud.internalDomain }}" + - "{{ .cloud.internalDomain }}" + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + duration: 2160h # 90 days + renewBefore: 360h # 15 days + privateKey: + algorithm: RSA + size: 2048 diff --git a/setup/cluster/cert-manager/kustomize.template/kustomization.yaml b/setup/cluster/cert-manager/kustomize.template/kustomization.yaml new file mode 100644 index 0000000..563aa00 --- /dev/null +++ b/setup/cluster/cert-manager/kustomize.template/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- namespace.yaml +- letsencrypt-staging-dns01.yaml +- letsencrypt-prod-dns01.yaml +- internal-wildcard-certificate.yaml +- wildcard-certificate.yaml + +# Note: cert-manager.yaml contains the main installation manifests +# but is applied separately via URL in the install script \ No newline at end of file diff --git a/setup/cluster/cert-manager/kustomize.template/letsencrypt-prod-dns01.yaml b/setup/cluster/cert-manager/kustomize.template/letsencrypt-prod-dns01.yaml new file mode 100644 index 0000000..2800c21 --- /dev/null +++ b/setup/cluster/cert-manager/kustomize.template/letsencrypt-prod-dns01.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: {{ .operator.email }} + privateKeySecretRef: + name: letsencrypt-prod + server: https://acme-v02.api.letsencrypt.org/directory + solvers: + # DNS-01 solver for wildcard certificates + - dns01: + cloudflare: + email: {{ .operator.email }} + apiTokenSecretRef: + name: cloudflare-api-token + key: api-token + selector: + dnsZones: + - "{{ .cluster.certManager.cloudflare.domain }}" + # Keep the HTTP-01 solver for non-wildcard certificates + - http01: + ingress: + class: traefik \ No newline at end of file diff --git a/setup/cluster/cert-manager/kustomize.template/letsencrypt-staging-dns01.yaml b/setup/cluster/cert-manager/kustomize.template/letsencrypt-staging-dns01.yaml new file mode 100644 index 0000000..b1e9edf --- /dev/null +++ b/setup/cluster/cert-manager/kustomize.template/letsencrypt-staging-dns01.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + email: {{ .operator.email }} + privateKeySecretRef: + name: letsencrypt-staging + server: https://acme-staging-v02.api.letsencrypt.org/directory + solvers: + # DNS-01 solver for wildcard certificates + - dns01: + cloudflare: + email: {{ .operator.email }} + apiTokenSecretRef: + name: cloudflare-api-token + key: api-token + selector: + dnsZones: + - "{{ .cluster.certManager.cloudflare.domain }}" + # Keep the HTTP-01 solver for non-wildcard certificates + - http01: + ingress: + class: traefik \ No newline at end of file diff --git a/setup/cluster/cert-manager/kustomize.template/namespace.yaml b/setup/cluster/cert-manager/kustomize.template/namespace.yaml new file mode 100644 index 0000000..661039b --- /dev/null +++ b/setup/cluster/cert-manager/kustomize.template/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager \ No newline at end of file diff --git a/setup/cluster/cert-manager/kustomize.template/wildcard-certificate.yaml b/setup/cluster/cert-manager/kustomize.template/wildcard-certificate.yaml new file mode 100644 index 0000000..3f18fa5 --- /dev/null +++ b/setup/cluster/cert-manager/kustomize.template/wildcard-certificate.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: wildcard-wild-cloud + namespace: cert-manager +spec: + secretName: wildcard-wild-cloud-tls + dnsNames: + - "*.{{ .cloud.domain }}" + - "{{ .cloud.domain }}" + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + duration: 2160h # 90 days + renewBefore: 360h # 15 days + privateKey: + algorithm: RSA + size: 2048 diff --git a/setup/cluster/cert-manager/kustomize/cert-manager.yaml b/setup/cluster/cert-manager/kustomize/cert-manager.yaml new file mode 100644 index 0000000..3ebf667 --- /dev/null +++ b/setup/cluster/cert-manager/kustomize/cert-manager.yaml @@ -0,0 +1,5623 @@ +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "v1.13.1" +spec: + group: cert-manager.io + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `Ready` status condition and its `status.failureTime` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired state of the CertificateRequest resource. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + type: object + required: + - issuerRef + - request + properties: + duration: + description: Requested 'duration' (i.e. lifetime) of the Certificate. Note that the issuer may choose to ignore the requested duration, just like any other requested attribute. + type: string + extra: + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + additionalProperties: + type: array + items: + type: string + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: array + items: + type: string + x-kubernetes-list-type: atomic + isCA: + description: "Requested basic constraints isCA value. Note that the issuer may choose to ignore the requested isCA value, just like any other requested attribute. \n NOTE: If the CSR in the `Request` field has a BasicConstraints extension, it must have the same isCA value as specified here. \n If true, this will automatically add the `cert sign` usage to the list of requested `usages`." + type: boolean + issuerRef: + description: "Reference to the issuer responsible for issuing the certificate. If the issuer is namespace-scoped, it must be in the same namespace as the Certificate. If the issuer is cluster-scoped, it can be used from any namespace. \n The `name` field of the reference must always be specified." + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: "The PEM-encoded X.509 certificate signing request to be submitted to the issuer for signing. \n If the CSR has a BasicConstraints extension, its isCA attribute must match the `isCA` value of this CertificateRequest. If the CSR has a KeyUsage extension, its key usages must match the key usages in the `usages` field of this CertificateRequest. If the CSR has a ExtKeyUsage extension, its extended key usages must match the extended key usages in the `usages` field of this CertificateRequest." + type: string + format: byte + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: "Requested key usages and extended key usages. \n NOTE: If the CSR in the `Request` field has uses the KeyUsage or ExtKeyUsage extension, these extensions must have the same values as specified here without any additional values. \n If unset, defaults to `digital signature` and `key encipherment`." + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + status: + description: 'Status of the CertificateRequest. This is set and managed automatically. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + type: object + properties: + ca: + description: The PEM encoded X.509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + type: string + format: byte + certificate: + description: The PEM encoded X.509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + type: string + format: byte + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`, `InvalidRequest`, `Approved` and `Denied`. + type: array + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "v1.13.1" +spec: + group: cert-manager.io + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed X.509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired state of the Certificate resource. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + type: object + required: + - issuerRef + - secretName + properties: + additionalOutputFormats: + description: "Defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. \n This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option set on both the controller and webhook components." + type: array + items: + description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. + type: object + required: + - type + properties: + type: + description: Type is the name of the format type that should be written to the Certificate's target Secret. + type: string + enum: + - DER + - CombinedPEM + commonName: + description: "Requested common name X509 certificate subject attribute. More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 NOTE: TLS clients will ignore this value when any subject alternative name is set (see https://tools.ietf.org/html/rfc6125#section-6.4.4). \n Should have a length of 64 characters or fewer to avoid generating invalid CSRs. Cannot be set if the `literalSubject` field is set." + type: string + dnsNames: + description: Requested DNS subject alternative names. + type: array + items: + type: string + duration: + description: "Requested 'duration' (i.e. lifetime) of the Certificate. Note that the issuer may choose to ignore the requested duration, just like any other requested attribute. \n If unset, this defaults to 90 days. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration." + type: string + emailAddresses: + description: Requested email subject alternative names. + type: array + items: + type: string + encodeUsagesInRequest: + description: "Whether the KeyUsage and ExtKeyUsage extensions should be set in the encoded CSR. \n This option defaults to true, and should only be disabled if the target issuer does not support CSRs with these X509 KeyUsage/ ExtKeyUsage extensions." + type: boolean + ipAddresses: + description: Requested IP address subject alternative names. + type: array + items: + type: string + isCA: + description: "Requested basic constraints isCA value. The isCA value is used to set the `isCA` field on the created CertificateRequest resources. Note that the issuer may choose to ignore the requested isCA value, just like any other requested attribute. \n If true, this will automatically add the `cert sign` usage to the list of requested `usages`." + type: boolean + issuerRef: + description: "Reference to the issuer responsible for issuing the certificate. If the issuer is namespace-scoped, it must be in the same namespace as the Certificate. If the issuer is cluster-scoped, it can be used from any namespace. \n The `name` field of the reference must always be specified." + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + keystores: + description: Additional keystore output formats to be stored in the Certificate's Secret. + type: object + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + literalSubject: + description: "Requested X.509 certificate subject, represented using the LDAP \"String Representation of a Distinguished Name\" [1]. Important: the LDAP string format also specifies the order of the attributes in the subject, this is important when issuing certs for LDAP authentication. Example: `CN=foo,DC=corp,DC=example,DC=com` More info [1]: https://datatracker.ietf.org/doc/html/rfc4514 More info: https://github.com/cert-manager/cert-manager/issues/3203 More info: https://github.com/cert-manager/cert-manager/issues/4424 \n Cannot be set if the `subject` or `commonName` field is set. This is an Alpha Feature and is only enabled with the `--feature-gates=LiteralCertificateSubject=true` option set on both the controller and webhook components." + type: string + privateKey: + description: Private key options. These include the key algorithm and size, the used encoding and the rotation policy. + type: object + properties: + algorithm: + description: "Algorithm is the private key algorithm of the corresponding private key for this certificate. \n If provided, allowed values are either `RSA`, `ECDSA` or `Ed25519`. If `algorithm` is specified and `size` is not provided, key size of 2048 will be used for `RSA` key algorithm and key size of 256 will be used for `ECDSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm." + type: string + enum: + - RSA + - ECDSA + - Ed25519 + encoding: + description: "The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. \n If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified." + type: string + enum: + - PKCS1 + - PKCS8 + rotationPolicy: + description: "RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. \n If set to `Never`, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to `Always`, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is `Never` for backward compatibility." + type: string + enum: + - Never + - Always + size: + description: "Size is the key bit size of the corresponding private key for this certificate. \n If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed." + type: integer + renewBefore: + description: "How long before the currently issued certificate's expiry cert-manager should renew the certificate. For example, if a certificate is valid for 60 minutes, and `renewBefore=10m`, cert-manager will begin to attempt to renew the certificate 50 minutes after it was issued (i.e. when there are 10 minutes remaining until the certificate is no longer valid). \n NOTE: The actual lifetime of the issued certificate is used to determine the renewal time. If an issuer returns a certificate with a different lifetime than the one requested, cert-manager will use the lifetime of the issued certificate. \n If unset, this defaults to 1/3 of the issued certificate's lifetime. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration." + type: string + revisionHistoryLimit: + description: "The maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. \n If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`." + type: integer + format: int32 + secretName: + description: Name of the Secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. The Secret resource lives in the same namespace as the Certificate resource. + type: string + secretTemplate: + description: Defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret. + type: object + properties: + annotations: + description: Annotations is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + labels: + description: Labels is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + subject: + description: "Requested set of X509 certificate subject attributes. More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 \n The common name attribute is specified separately in the `commonName` field. Cannot be set if the `literalSubject` field is set." + type: object + properties: + countries: + description: Countries to be used on the Certificate. + type: array + items: + type: string + localities: + description: Cities to be used on the Certificate. + type: array + items: + type: string + organizationalUnits: + description: Organizational Units to be used on the Certificate. + type: array + items: + type: string + organizations: + description: Organizations to be used on the Certificate. + type: array + items: + type: string + postalCodes: + description: Postal codes to be used on the Certificate. + type: array + items: + type: string + provinces: + description: State/Provinces to be used on the Certificate. + type: array + items: + type: string + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + type: array + items: + type: string + uris: + description: Requested URI subject alternative names. + type: array + items: + type: string + usages: + description: "Requested key usages and extended key usages. These usages are used to set the `usages` field on the created CertificateRequest resources. If `encodeUsagesInRequest` is unset or set to `true`, the usages will additionally be encoded in the `request` field which contains the CSR blob. \n If unset, defaults to `digital signature` and `key encipherment`." + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: 'Status of the Certificate. This is set and managed automatically. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + type: object + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + type: array + items: + description: CertificateCondition contains condition information for an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer + lastFailureTime: + description: LastFailureTime is set only if the lastest issuance for this Certificate failed and contains the time of the failure. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). If the latest issuance has succeeded this field will be unset. + type: string + format: date-time + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + type: string + format: date-time + notBefore: + description: The time after which the certificate stored in the secret named by this resource in `spec.secretName` is valid. + type: string + format: date-time + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + type: string + format: date-time + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "v1.13.1" +spec: + group: acme.cert-manager.io + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + key: + description: 'The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content.' + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific." + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + ingressClassName: + description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + imagePullSecrets: + description: If specified, the pod's imagePullSecrets + type: array + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + x-kubernetes-map-type: atomic + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + type: string + enum: + - HTTP-01 + - DNS-01 + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + status: + type: object + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + served: true + storage: true + subresources: + status: {} +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.13.1" +spec: + group: cert-manager.io + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + categories: + - cert-manager + scope: Cluster + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. + type: string + format: byte + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: 'INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false.' + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific." + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + ingressClassName: + description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + imagePullSecrets: + description: If specified, the pod's imagePullSecrets + type: array + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + x-kubernetes-map-type: atomic + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceAccountRef: + description: A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token. + type: object + required: + - name + properties: + name: + description: Name of the ServiceAccount used to request a token. + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastPrivateKeyHash: + description: LastPrivateKeyHash is a hash of the private key associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.13.1" +spec: + group: cert-manager.io + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. + type: string + format: byte + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: 'Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme.' + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: 'INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false.' + type: boolean + solvers: + description: 'Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/' + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: 'API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions.' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: 'The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``.' + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific." + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: This field configures the annotation `kubernetes.io/ingress.class` when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + ingressClassName: + description: This field configures the field `ingressClassName` on the created Ingress resources used to solve ACME challenges that use this challenge solver. This is the recommended way of configuring the ingress class. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. Only one of `class`, `name` or `ingressClassName` may be specified. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Check ACMEChallengeSolverHTTP01IngressPodSpec to find out currently supported fields. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + imagePullSecrets: + description: If specified, the pod's imagePullSecrets + type: array + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + x-kubernetes-map-type: atomic + nodeSelector: + description: 'NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node''s labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + serviceAccountRef: + description: A reference to a service account that will be used to request a bound token (also known as "projected token"). Compared to using "secretRef", using this field means that you don't rely on statically bound tokens. To use this field, you must configure an RBAC rule to let cert-manager request a token. + type: object + required: + - name + properties: + name: + description: Name of the ServiceAccount used to request a token. + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the Issuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastPrivateKeyHash: + description: LastPrivateKeyHash is a hash of the private key associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io + labels: + app: 'cert-manager' + app.kubernetes.io/name: 'cert-manager' + app.kubernetes.io/instance: 'cert-manager' + # Generated labels + app.kubernetes.io/version: "v1.13.1" +spec: + group: acme.cert-manager.io + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - issuerRef + - request + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + type: string + format: byte + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + served: true + storage: true +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +--- +# Source: cert-manager/templates/controller-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +data: +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [ "gateway.networking.k8s.io" ] + resources: [ "httproutes" ] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cluster-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" + rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +rules: +- apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: + - 'cert-manager-webhook-ca' + verbs: ["get", "list", "watch", "update"] +# It's not possible to grant CREATE permission on a single resourceName. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.13.1" + spec: + serviceAccountName: cert-manager-cainjector + enableServiceLinks: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "quay.io/jetstack/cert-manager-cainjector:v1.13.1" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.13.1" + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: 'true' + prometheus.io/port: '9402' + spec: + serviceAccountName: cert-manager + enableServiceLinks: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "quay.io/jetstack/cert-manager-controller:v1.13.1" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + - --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.13.1 + - --max-concurrent-challenges=60 + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + - containerPort: 9403 + name: http-healthz + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" + spec: + serviceAccountName: cert-manager-webhook + enableServiceLinks: false + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "quay.io/jetstack/cert-manager-webhook:v1.13.1" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.13.1" + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /validate diff --git a/setup/cluster/cert-manager/internal-wildcard-certificate.yaml b/setup/cluster/cert-manager/kustomize/internal-wildcard-certificate.yaml similarity index 84% rename from setup/cluster/cert-manager/internal-wildcard-certificate.yaml rename to setup/cluster/cert-manager/kustomize/internal-wildcard-certificate.yaml index bb25c54..2177e1a 100644 --- a/setup/cluster/cert-manager/internal-wildcard-certificate.yaml +++ b/setup/cluster/cert-manager/kustomize/internal-wildcard-certificate.yaml @@ -7,8 +7,8 @@ metadata: spec: secretName: wildcard-internal-wild-cloud-tls dnsNames: - - "*.internal.${DOMAIN}" - - "internal.${DOMAIN}" + - "*.internal.cloud2.payne.io" + - "internal.cloud2.payne.io" issuerRef: name: letsencrypt-prod kind: ClusterIssuer diff --git a/setup/cluster/cert-manager/kustomize/kustomization.yaml b/setup/cluster/cert-manager/kustomize/kustomization.yaml new file mode 100644 index 0000000..563aa00 --- /dev/null +++ b/setup/cluster/cert-manager/kustomize/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- namespace.yaml +- letsencrypt-staging-dns01.yaml +- letsencrypt-prod-dns01.yaml +- internal-wildcard-certificate.yaml +- wildcard-certificate.yaml + +# Note: cert-manager.yaml contains the main installation manifests +# but is applied separately via URL in the install script \ No newline at end of file diff --git a/setup/cluster/cert-manager/letsencrypt-prod-dns01.yaml b/setup/cluster/cert-manager/kustomize/letsencrypt-prod-dns01.yaml similarity index 87% rename from setup/cluster/cert-manager/letsencrypt-prod-dns01.yaml rename to setup/cluster/cert-manager/kustomize/letsencrypt-prod-dns01.yaml index e00c0e5..3cdd7f8 100644 --- a/setup/cluster/cert-manager/letsencrypt-prod-dns01.yaml +++ b/setup/cluster/cert-manager/kustomize/letsencrypt-prod-dns01.yaml @@ -5,7 +5,7 @@ metadata: name: letsencrypt-prod spec: acme: - email: ${EMAIL} + email: paul@payne.io privateKeySecretRef: name: letsencrypt-prod server: https://acme-v02.api.letsencrypt.org/directory @@ -13,13 +13,13 @@ spec: # DNS-01 solver for wildcard certificates - dns01: cloudflare: - email: ${EMAIL} + email: paul@payne.io apiTokenSecretRef: name: cloudflare-api-token key: api-token selector: dnsZones: - - "${CLOUDFLARE_DOMAIN}" + - "payne.io" # Keep the HTTP-01 solver for non-wildcard certificates - http01: ingress: diff --git a/setup/cluster/cert-manager/letsencrypt-staging-dns01.yaml b/setup/cluster/cert-manager/kustomize/letsencrypt-staging-dns01.yaml similarity index 87% rename from setup/cluster/cert-manager/letsencrypt-staging-dns01.yaml rename to setup/cluster/cert-manager/kustomize/letsencrypt-staging-dns01.yaml index 7d53ee4..b584d10 100644 --- a/setup/cluster/cert-manager/letsencrypt-staging-dns01.yaml +++ b/setup/cluster/cert-manager/kustomize/letsencrypt-staging-dns01.yaml @@ -5,7 +5,7 @@ metadata: name: letsencrypt-staging spec: acme: - email: ${EMAIL} + email: paul@payne.io privateKeySecretRef: name: letsencrypt-staging server: https://acme-staging-v02.api.letsencrypt.org/directory @@ -13,13 +13,13 @@ spec: # DNS-01 solver for wildcard certificates - dns01: cloudflare: - email: ${EMAIL} + email: paul@payne.io apiTokenSecretRef: name: cloudflare-api-token key: api-token selector: dnsZones: - - "${CLOUDFLARE_DOMAIN}" + - "payne.io" # Keep the HTTP-01 solver for non-wildcard certificates - http01: ingress: diff --git a/setup/cluster/cert-manager/kustomize/namespace.yaml b/setup/cluster/cert-manager/kustomize/namespace.yaml new file mode 100644 index 0000000..661039b --- /dev/null +++ b/setup/cluster/cert-manager/kustomize/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager \ No newline at end of file diff --git a/setup/cluster/cert-manager/wildcard-certificate.yaml b/setup/cluster/cert-manager/kustomize/wildcard-certificate.yaml similarity index 87% rename from setup/cluster/cert-manager/wildcard-certificate.yaml rename to setup/cluster/cert-manager/kustomize/wildcard-certificate.yaml index 87d2dbf..2d08016 100644 --- a/setup/cluster/cert-manager/wildcard-certificate.yaml +++ b/setup/cluster/cert-manager/kustomize/wildcard-certificate.yaml @@ -7,8 +7,8 @@ metadata: spec: secretName: wildcard-wild-cloud-tls dnsNames: - - "*.${DOMAIN}" - - "${DOMAIN}" + - "*.cloud2.payne.io" + - "cloud2.payne.io" issuerRef: name: letsencrypt-prod kind: ClusterIssuer diff --git a/setup/cluster/coredns/README.md b/setup/cluster/coredns/README.md index 99cfd2b..80116de 100644 --- a/setup/cluster/coredns/README.md +++ b/setup/cluster/coredns/README.md @@ -19,31 +19,27 @@ Any query for a resource in the `internal.$DOMAIN` domain will be given the IP o ## Default CoreDNS Configuration -Found at: https://github.com/k3s-io/k3s/blob/master/manifests/coredns.yaml - -This is k3s default CoreDNS configuration, for reference: +This is the default CoreDNS configuration, for reference: ```txt .:53 { errors - health + health { lameduck 5s } ready - kubernetes %{CLUSTER_DOMAIN}% in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - } - hosts /etc/coredns/NodeHosts { - ttl 60 - reload 15s - fallthrough - } + log . { class error } prometheus :9153 - forward . /etc/resolv.conf - cache 30 + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + forward . /etc/resolv.conf { max_concurrent 1000 } + cache 30 { + disable success cluster.local + disable denial cluster.local + } loop reload loadbalance - import /etc/coredns/custom/*.override } -import /etc/coredns/custom/*.server ``` diff --git a/setup/cluster/coredns/install.sh b/setup/cluster/coredns/install.sh new file mode 100755 index 0000000..72b5a3d --- /dev/null +++ b/setup/cluster/coredns/install.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +COREDNS_DIR="${CLUSTER_SETUP_DIR}/coredns" + +echo "Setting up CoreDNS for k3s..." + +# Process templates with wild-compile-template-dir +echo "Processing CoreDNS templates..." +wild-compile-template-dir --clean ${COREDNS_DIR}/kustomize.template ${COREDNS_DIR}/kustomize + +# Apply the k3s-compatible custom DNS override (k3s will preserve this) +echo "Applying CoreDNS custom override configuration..." +kubectl apply -f "${COREDNS_DIR}/kustomize/coredns-custom-config.yaml" + +# Apply the LoadBalancer service for external access to CoreDNS +echo "Applying CoreDNS service configuration..." +kubectl apply -f "${COREDNS_DIR}/kustomize/coredns-lb-service.yaml" + +# Restart CoreDNS pods to apply the changes +echo "Restarting CoreDNS pods to apply changes..." +kubectl rollout restart deployment/coredns -n kube-system +kubectl rollout status deployment/coredns -n kube-system + +echo "CoreDNS setup complete!" +echo +echo "To verify the installation:" +echo " kubectl get pods -n kube-system" +echo " kubectl get svc -n kube-system coredns" +echo " kubectl describe svc -n kube-system coredns" +echo " kubectl logs -n kube-system -l k8s-app=kube-dns -f" diff --git a/setup/cluster/coredns/kustomize.template/coredns-custom-config.yaml b/setup/cluster/coredns/kustomize.template/coredns-custom-config.yaml new file mode 100644 index 0000000..4a50e98 --- /dev/null +++ b/setup/cluster/coredns/kustomize.template/coredns-custom-config.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns-custom + namespace: kube-system +data: + # Custom server block for internal domains. All internal domains should + # resolve to the cluster proxy. + internal.server: | + {{ .cloud.internalDomain }} { + errors + cache 30 + reload + template IN A { + match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\. + answer "{{`{{ .Name }}`}} 60 IN A {{ .cluster.loadBalancerIp }}" + } + template IN AAAA { + match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\. + rcode NXDOMAIN + } + } + # Custom override to set external resolvers. + external.override: | + forward . {{ .cloud.dns.externalResolver }} { + max_concurrent 1000 + } diff --git a/setup/cluster/coredns/kustomize.template/coredns-lb-service.yaml b/setup/cluster/coredns/kustomize.template/coredns-lb-service.yaml new file mode 100644 index 0000000..18d47e2 --- /dev/null +++ b/setup/cluster/coredns/kustomize.template/coredns-lb-service.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: coredns-lb + namespace: kube-system + annotations: + metallb.universe.tf/loadBalancerIPs: "{{ .cluster.loadBalancerIp }}" +spec: + type: LoadBalancer + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + - name: dns-tcp + port: 53 + protocol: TCP + targetPort: 53 + - name: metrics + port: 9153 + protocol: TCP + targetPort: 9153 + selector: + k8s-app: kube-dns \ No newline at end of file diff --git a/setup/cluster/coredns/coredns-custom-config.yaml b/setup/cluster/coredns/kustomize/coredns-custom-config.yaml similarity index 68% rename from setup/cluster/coredns/coredns-custom-config.yaml rename to setup/cluster/coredns/kustomize/coredns-custom-config.yaml index 3bd7f43..369aaae 100644 --- a/setup/cluster/coredns/coredns-custom-config.yaml +++ b/setup/cluster/coredns/kustomize/coredns-custom-config.yaml @@ -8,21 +8,21 @@ data: # Custom server block for internal domains. All internal domains should # resolve to the cluster proxy. internal.server: | - internal.cloud.payne.io { + internal.cloud2.payne.io { errors cache 30 reload template IN A { - match (.*)\.internal\.cloud\.payne\.io\. - answer "{{ .Name }} 60 IN A 192.168.8.240" + match (.*)\.internal\.cloud2\.payne\.io\. + answer "{{ .Name }} 60 IN A 192.168.8.20" } template IN AAAA { - match (.*)\.internal\.cloud\.payne\.io\. + match (.*)\.internal\.cloud2\.payne\.io\. rcode NXDOMAIN } } # Custom override to set external resolvers. external.override: | - forward . 1.1.1.1 8.8.8.8 { + forward . 1.1.1.1 { max_concurrent 1000 } diff --git a/setup/cluster/coredns/coredns-lb-service.yaml b/setup/cluster/coredns/kustomize/coredns-lb-service.yaml similarity index 86% rename from setup/cluster/coredns/coredns-lb-service.yaml rename to setup/cluster/coredns/kustomize/coredns-lb-service.yaml index b9d4504..100bcf7 100644 --- a/setup/cluster/coredns/coredns-lb-service.yaml +++ b/setup/cluster/coredns/kustomize/coredns-lb-service.yaml @@ -5,7 +5,7 @@ metadata: name: coredns-lb namespace: kube-system annotations: - metallb.universe.tf/loadBalancerIPs: "192.168.8.241" + metallb.universe.tf/loadBalancerIPs: "192.168.8.20" spec: type: LoadBalancer ports: diff --git a/setup/cluster/docker-registry/README.md b/setup/cluster/docker-registry/README.md new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster/docker-registry/config/example.env b/setup/cluster/docker-registry/config/example.env deleted file mode 100644 index 4996d40..0000000 --- a/setup/cluster/docker-registry/config/example.env +++ /dev/null @@ -1,2 +0,0 @@ -DOCKER_REGISTRY_STORAGE=10Gi -DOCKER_REGISTRY_HOST=docker-registry.$INTERNAL_DOMAIN diff --git a/setup/cluster/docker-registry/install.sh b/setup/cluster/docker-registry/install.sh new file mode 100755 index 0000000..d9a3794 --- /dev/null +++ b/setup/cluster/docker-registry/install.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +DOCKER_REGISTRY_DIR="${CLUSTER_SETUP_DIR}/docker-registry" + +echo "Setting up Docker Registry..." + +# Process templates with wild-compile-template-dir +echo "Processing Docker Registry templates..." +wild-compile-template-dir --clean ${DOCKER_REGISTRY_DIR}/kustomize.template ${DOCKER_REGISTRY_DIR}/kustomize + +# Apply the docker registry manifests using kustomize +kubectl apply -k "${DOCKER_REGISTRY_DIR}/kustomize" + +echo "Waiting for Docker Registry to be ready..." +kubectl wait --for=condition=available --timeout=300s deployment/docker-registry -n docker-registry + +echo "Docker Registry setup complete!" + +# Show deployment status +kubectl get pods -n docker-registry +kubectl get services -n docker-registry \ No newline at end of file diff --git a/setup/cluster/docker-registry/kustomization.yaml b/setup/cluster/docker-registry/kustomization.yaml deleted file mode 100644 index adc14f4..0000000 --- a/setup/cluster/docker-registry/kustomization.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: docker-registry -labels: - - includeSelectors: true - pairs: - app: docker-registry - managedBy: wild-cloud -resources: - - deployment.yaml - - ingress.yaml - - service.yaml - - namespace.yaml - - pvc.yaml -configMapGenerator: - - name: docker-registry-config - envs: - - config/config.env -replacements: - - source: - kind: ConfigMap - name: docker-registry-config - fieldPath: data.DOCKER_REGISTRY_STORAGE - targets: - - select: - kind: PersistentVolumeClaim - name: docker-registry-pvc - fieldPaths: - - spec.resources.requests.storage - - source: - kind: ConfigMap - name: docker-registry-config - fieldPath: data.DOCKER_REGISTRY_HOST - targets: - - select: - kind: Ingress - name: docker-registry - fieldPaths: - - spec.rules.0.host - - spec.tls.0.hosts.0 diff --git a/setup/cluster/docker-registry/deployment.yaml b/setup/cluster/docker-registry/kustomize.template/deployment.yaml similarity index 100% rename from setup/cluster/docker-registry/deployment.yaml rename to setup/cluster/docker-registry/kustomize.template/deployment.yaml diff --git a/setup/cluster/docker-registry/ingress.yaml b/setup/cluster/docker-registry/kustomize.template/ingress.yaml similarity index 80% rename from setup/cluster/docker-registry/ingress.yaml rename to setup/cluster/docker-registry/kustomize.template/ingress.yaml index f1087fd..7982050 100644 --- a/setup/cluster/docker-registry/ingress.yaml +++ b/setup/cluster/docker-registry/kustomize.template/ingress.yaml @@ -4,7 +4,7 @@ metadata: name: docker-registry spec: rules: - - host: docker-registry.internal.${DOMAIN} + - host: {{ .cloud.dockerRegistryHost }} http: paths: - path: / @@ -16,5 +16,5 @@ spec: number: 5000 tls: - hosts: - - docker-registry.internal.${DOMAIN} + - {{ .cloud.dockerRegistryHost }} secretName: wildcard-internal-wild-cloud-tls diff --git a/setup/cluster/docker-registry/kustomize.template/kustomization.yaml b/setup/cluster/docker-registry/kustomize.template/kustomization.yaml new file mode 100644 index 0000000..2271c5a --- /dev/null +++ b/setup/cluster/docker-registry/kustomize.template/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: docker-registry +labels: + - includeSelectors: true + pairs: + app: docker-registry + managedBy: wild-cloud +resources: + - deployment.yaml + - ingress.yaml + - service.yaml + - namespace.yaml + - pvc.yaml diff --git a/setup/cluster/docker-registry/namespace.yaml b/setup/cluster/docker-registry/kustomize.template/namespace.yaml similarity index 100% rename from setup/cluster/docker-registry/namespace.yaml rename to setup/cluster/docker-registry/kustomize.template/namespace.yaml diff --git a/setup/cluster/docker-registry/kustomize.template/pvc.yaml b/setup/cluster/docker-registry/kustomize.template/pvc.yaml new file mode 100644 index 0000000..41b427d --- /dev/null +++ b/setup/cluster/docker-registry/kustomize.template/pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: docker-registry-pvc +spec: + storageClassName: longhorn + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: {{ .cluster.dockerRegistry.storage }} diff --git a/setup/cluster/docker-registry/service.yaml b/setup/cluster/docker-registry/kustomize.template/service.yaml similarity index 100% rename from setup/cluster/docker-registry/service.yaml rename to setup/cluster/docker-registry/kustomize.template/service.yaml diff --git a/setup/cluster/docker-registry/kustomize/deployment.yaml b/setup/cluster/docker-registry/kustomize/deployment.yaml new file mode 100644 index 0000000..dc8cc08 --- /dev/null +++ b/setup/cluster/docker-registry/kustomize/deployment.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: docker-registry + labels: + app: docker-registry +spec: + replicas: 1 + selector: + matchLabels: + app: docker-registry + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: docker-registry + spec: + containers: + - image: registry:3.0.0 + name: docker-registry + ports: + - containerPort: 5000 + protocol: TCP + volumeMounts: + - mountPath: /var/lib/registry + name: docker-registry-storage + readOnly: false + volumes: + - name: docker-registry-storage + persistentVolumeClaim: + claimName: docker-registry-pvc diff --git a/setup/cluster/docker-registry/kustomize/ingress.yaml b/setup/cluster/docker-registry/kustomize/ingress.yaml new file mode 100644 index 0000000..1614959 --- /dev/null +++ b/setup/cluster/docker-registry/kustomize/ingress.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: docker-registry +spec: + rules: + - host: docker-registry.internal.cloud2.payne.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: docker-registry + port: + number: 5000 + tls: + - hosts: + - docker-registry.internal.cloud2.payne.io + secretName: wildcard-internal-wild-cloud-tls diff --git a/setup/cluster/docker-registry/kustomize/kustomization.yaml b/setup/cluster/docker-registry/kustomize/kustomization.yaml new file mode 100644 index 0000000..2271c5a --- /dev/null +++ b/setup/cluster/docker-registry/kustomize/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: docker-registry +labels: + - includeSelectors: true + pairs: + app: docker-registry + managedBy: wild-cloud +resources: + - deployment.yaml + - ingress.yaml + - service.yaml + - namespace.yaml + - pvc.yaml diff --git a/setup/cluster/docker-registry/kustomize/namespace.yaml b/setup/cluster/docker-registry/kustomize/namespace.yaml new file mode 100644 index 0000000..4cd3252 --- /dev/null +++ b/setup/cluster/docker-registry/kustomize/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: docker-registry diff --git a/setup/cluster/docker-registry/pvc.yaml b/setup/cluster/docker-registry/kustomize/pvc.yaml similarity index 100% rename from setup/cluster/docker-registry/pvc.yaml rename to setup/cluster/docker-registry/kustomize/pvc.yaml diff --git a/setup/cluster/docker-registry/kustomize/service.yaml b/setup/cluster/docker-registry/kustomize/service.yaml new file mode 100644 index 0000000..b040967 --- /dev/null +++ b/setup/cluster/docker-registry/kustomize/service.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: docker-registry + labels: + app: docker-registry +spec: + ports: + - port: 5000 + targetPort: 5000 + selector: + app: docker-registry diff --git a/setup/cluster/externaldns/install.sh b/setup/cluster/externaldns/install.sh new file mode 100755 index 0000000..c7a65bf --- /dev/null +++ b/setup/cluster/externaldns/install.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +EXTERNALDNS_DIR="${CLUSTER_SETUP_DIR}/externaldns" + +# Process templates with wild-compile-template-dir +echo "Processing ExternalDNS templates..." +wild-compile-template-dir --clean ${EXTERNALDNS_DIR}/kustomize.template ${EXTERNALDNS_DIR}/kustomize + +echo "Setting up ExternalDNS..." + +# Apply ExternalDNS manifests using kustomize +echo "Deploying ExternalDNS..." +kubectl apply -k ${EXTERNALDNS_DIR}/kustomize + +# Setup Cloudflare API token secret +echo "Creating Cloudflare API token secret..." +CLOUDFLARE_API_TOKEN=$(wild-secret cluster.certManager.cloudflare.apiToken) || exit 1 +kubectl create secret generic cloudflare-api-token \ + --namespace externaldns \ + --from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \ + --dry-run=client -o yaml | kubectl apply -f - + +# Wait for ExternalDNS to be ready +echo "Waiting for Cloudflare ExternalDNS to be ready..." +kubectl rollout status deployment/external-dns -n externaldns --timeout=60s + +# echo "Waiting for CoreDNS ExternalDNS to be ready..." +# kubectl rollout status deployment/external-dns-coredns -n externaldns --timeout=60s + +echo "ExternalDNS setup complete!" +echo "" +echo "To verify the installation:" +echo " kubectl get pods -n externaldns" +echo " kubectl logs -n externaldns -l app=external-dns -f" +echo " kubectl logs -n externaldns -l app=external-dns-coredns -f" diff --git a/setup/cluster/externaldns/kustomize.template/externaldns-cloudflare.yaml b/setup/cluster/externaldns/kustomize.template/externaldns-cloudflare.yaml new file mode 100644 index 0000000..7d95efe --- /dev/null +++ b/setup/cluster/externaldns/kustomize.template/externaldns-cloudflare.yaml @@ -0,0 +1,39 @@ +--- +# CloudFlare provider for ExternalDNS +apiVersion: apps/v1 +kind: Deployment +metadata: + name: external-dns + namespace: externaldns +spec: + selector: + matchLabels: + app: external-dns + strategy: + type: Recreate + template: + metadata: + labels: + app: external-dns + spec: + serviceAccountName: external-dns + containers: + - name: external-dns + image: registry.k8s.io/external-dns/external-dns:v0.13.4 + args: + - --source=service + - --source=ingress + - --txt-owner-id={{ .cluster.externalDns.ownerId }} + - --provider=cloudflare + - --domain-filter=payne.io + #- --exclude-domains=internal.${DOMAIN} + - --cloudflare-dns-records-per-page=5000 + - --publish-internal-services + - --no-cloudflare-proxied + - --log-level=debug + env: + - name: CF_API_TOKEN + valueFrom: + secretKeyRef: + name: cloudflare-api-token + key: api-token \ No newline at end of file diff --git a/setup/cluster/externaldns/externaldns-rbac.yaml b/setup/cluster/externaldns/kustomize.template/externaldns-rbac.yaml similarity index 100% rename from setup/cluster/externaldns/externaldns-rbac.yaml rename to setup/cluster/externaldns/kustomize.template/externaldns-rbac.yaml diff --git a/setup/cluster/externaldns/kustomize.template/kustomization.yaml b/setup/cluster/externaldns/kustomize.template/kustomization.yaml new file mode 100644 index 0000000..784dc2a --- /dev/null +++ b/setup/cluster/externaldns/kustomize.template/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- namespace.yaml +- externaldns-rbac.yaml +- externaldns-cloudflare.yaml \ No newline at end of file diff --git a/setup/cluster/externaldns/kustomize.template/namespace.yaml b/setup/cluster/externaldns/kustomize.template/namespace.yaml new file mode 100644 index 0000000..d16030e --- /dev/null +++ b/setup/cluster/externaldns/kustomize.template/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: externaldns \ No newline at end of file diff --git a/setup/cluster/externaldns/externaldns-cloudflare.yaml b/setup/cluster/externaldns/kustomize/externaldns-cloudflare.yaml similarity index 95% rename from setup/cluster/externaldns/externaldns-cloudflare.yaml rename to setup/cluster/externaldns/kustomize/externaldns-cloudflare.yaml index acc6ac5..d7234b9 100644 --- a/setup/cluster/externaldns/externaldns-cloudflare.yaml +++ b/setup/cluster/externaldns/kustomize/externaldns-cloudflare.yaml @@ -23,7 +23,7 @@ spec: args: - --source=service - --source=ingress - - --txt-owner-id=${OWNER_ID} + - --txt-owner-id=cloud-payne-io-cluster - --provider=cloudflare - --domain-filter=payne.io #- --exclude-domains=internal.${DOMAIN} diff --git a/setup/cluster/externaldns/kustomize/externaldns-rbac.yaml b/setup/cluster/externaldns/kustomize/externaldns-rbac.yaml new file mode 100644 index 0000000..22854eb --- /dev/null +++ b/setup/cluster/externaldns/kustomize/externaldns-rbac.yaml @@ -0,0 +1,35 @@ +--- +# Common RBAC resources for all ExternalDNS deployments +apiVersion: v1 +kind: ServiceAccount +metadata: + name: external-dns + namespace: externaldns +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: external-dns +rules: + - apiGroups: [""] + resources: ["services", "endpoints", "pods"] + verbs: ["get", "watch", "list"] + - apiGroups: ["extensions", "networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "watch", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: external-dns-viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-dns +subjects: + - kind: ServiceAccount + name: external-dns + namespace: externaldns \ No newline at end of file diff --git a/setup/cluster/externaldns/kustomize/kustomization.yaml b/setup/cluster/externaldns/kustomize/kustomization.yaml new file mode 100644 index 0000000..784dc2a --- /dev/null +++ b/setup/cluster/externaldns/kustomize/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- namespace.yaml +- externaldns-rbac.yaml +- externaldns-cloudflare.yaml \ No newline at end of file diff --git a/setup/cluster/externaldns/kustomize/namespace.yaml b/setup/cluster/externaldns/kustomize/namespace.yaml new file mode 100644 index 0000000..d16030e --- /dev/null +++ b/setup/cluster/externaldns/kustomize/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: externaldns \ No newline at end of file diff --git a/setup/cluster/install-all.sh b/setup/cluster/install-all.sh new file mode 100755 index 0000000..a469258 --- /dev/null +++ b/setup/cluster/install-all.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +# Navigate to script directory +SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" +cd "$SCRIPT_DIR" + +echo "Setting up your wild-cloud cluster services..." +echo + +./metallb/install.sh +./longhorn/install.sh +./traefik/install.sh +./coredns/install.sh +./cert-manager/install.sh +./externaldns/install.sh +./kubernetes-dashboard/install.sh +./nfs/install.sh +./docker-registry/install.sh + +echo "Infrastructure setup complete!" +echo +echo "Next steps:" +echo "1. Install Helm charts for non-infrastructure components" +INTERNAL_DOMAIN=$(wild-config cloud.internalDomain) +echo "2. Access the dashboard at: https://dashboard.${INTERNAL_DOMAIN}" +echo "3. Get the dashboard token with: ./bin/dashboard-token" +echo +echo "To verify components, run:" +echo "- kubectl get pods -n cert-manager" +echo "- kubectl get pods -n externaldns" +echo "- kubectl get pods -n kubernetes-dashboard" +echo "- kubectl get clusterissuers" \ No newline at end of file diff --git a/setup/cluster/kubernetes-dashboard/README.md b/setup/cluster/kubernetes-dashboard/README.md new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster/kubernetes-dashboard/install.sh b/setup/cluster/kubernetes-dashboard/install.sh new file mode 100755 index 0000000..27c70a4 --- /dev/null +++ b/setup/cluster/kubernetes-dashboard/install.sh @@ -0,0 +1,60 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +KUBERNETES_DASHBOARD_DIR="${CLUSTER_SETUP_DIR}/kubernetes-dashboard" + +echo "Setting up Kubernetes Dashboard..." + +# Process templates with wild-compile-template-dir +echo "Processing Dashboard templates..." +wild-compile-template-dir --clean ${KUBERNETES_DASHBOARD_DIR}/kustomize.template ${KUBERNETES_DASHBOARD_DIR}/kustomize + +NAMESPACE="kubernetes-dashboard" + +# Apply the official dashboard installation +echo "Installing Kubernetes Dashboard core components..." +kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml + +# Wait for cert-manager certificates to be ready +echo "Waiting for cert-manager certificates to be ready..." +kubectl wait --for=condition=Ready certificate wildcard-internal-wild-cloud -n cert-manager --timeout=300s || echo "Warning: Internal wildcard certificate not ready yet" +kubectl wait --for=condition=Ready certificate wildcard-wild-cloud -n cert-manager --timeout=300s || echo "Warning: Wildcard certificate not ready yet" + +# Copying cert-manager secrets to the dashboard namespace (if available) +echo "Copying cert-manager secrets to dashboard namespace..." +if kubectl get secret wildcard-internal-wild-cloud-tls -n cert-manager >/dev/null 2>&1; then + copy-secret cert-manager:wildcard-internal-wild-cloud-tls $NAMESPACE +else + echo "Warning: wildcard-internal-wild-cloud-tls secret not yet available" +fi + +if kubectl get secret wildcard-wild-cloud-tls -n cert-manager >/dev/null 2>&1; then + copy-secret cert-manager:wildcard-wild-cloud-tls $NAMESPACE +else + echo "Warning: wildcard-wild-cloud-tls secret not yet available" +fi + +# Apply dashboard customizations using kustomize +echo "Applying dashboard customizations..." +kubectl apply -k "${KUBERNETES_DASHBOARD_DIR}/kustomize" + +# Restart CoreDNS to pick up the changes +kubectl delete pods -n kube-system -l k8s-app=kube-dns +echo "Restarted CoreDNS to pick up DNS changes" + +# Wait for dashboard to be ready +echo "Waiting for Kubernetes Dashboard to be ready..." +kubectl rollout status deployment/kubernetes-dashboard -n $NAMESPACE --timeout=60s + +echo "Kubernetes Dashboard setup complete!" +INTERNAL_DOMAIN=$(wild-config cloud.internalDomain) || exit 1 +echo "Access the dashboard at: https://dashboard.${INTERNAL_DOMAIN}" +echo "" +echo "To get the authentication token, run:" +echo "wild-dashboard-token" diff --git a/setup/cluster/kubernetes-dashboard/dashboard-admin-rbac.yaml b/setup/cluster/kubernetes-dashboard/kustomize.template/dashboard-admin-rbac.yaml similarity index 100% rename from setup/cluster/kubernetes-dashboard/dashboard-admin-rbac.yaml rename to setup/cluster/kubernetes-dashboard/kustomize.template/dashboard-admin-rbac.yaml diff --git a/setup/cluster/kubernetes-dashboard/dashboard-kube-system.yaml b/setup/cluster/kubernetes-dashboard/kustomize.template/dashboard-kube-system.yaml similarity index 82% rename from setup/cluster/kubernetes-dashboard/dashboard-kube-system.yaml rename to setup/cluster/kubernetes-dashboard/kustomize.template/dashboard-kube-system.yaml index a404e51..b4e9479 100644 --- a/setup/cluster/kubernetes-dashboard/dashboard-kube-system.yaml +++ b/setup/cluster/kubernetes-dashboard/kustomize.template/dashboard-kube-system.yaml @@ -1,6 +1,6 @@ --- # Internal-only middleware -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: Middleware metadata: name: internal-only @@ -16,7 +16,7 @@ spec: --- # HTTPS redirect middleware -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: Middleware metadata: name: dashboard-redirect-scheme @@ -28,7 +28,7 @@ spec: --- # IngressRoute for Dashboard -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: IngressRoute metadata: name: kubernetes-dashboard-https @@ -37,7 +37,7 @@ spec: entryPoints: - websecure routes: - - match: Host(`dashboard.internal.${DOMAIN}`) + - match: Host(`dashboard.{{ .cloud.internalDomain }}`) kind: Rule middlewares: - name: internal-only @@ -52,7 +52,7 @@ spec: --- # HTTP to HTTPS redirect. # FIXME: Is this needed? -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: IngressRoute metadata: name: kubernetes-dashboard-http @@ -61,7 +61,7 @@ spec: entryPoints: - web routes: - - match: Host(`dashboard.internal.${DOMAIN}`) + - match: Host(`dashboard.{{ .cloud.internalDomain }}`) kind: Rule middlewares: - name: dashboard-redirect-scheme @@ -74,11 +74,11 @@ spec: --- # ServersTransport for HTTPS backend with skip verify. # FIXME: Is this needed? -apiVersion: traefik.containo.us/v1alpha1 +apiVersion: traefik.io/v1alpha1 kind: ServersTransport metadata: name: dashboard-transport namespace: kubernetes-dashboard spec: insecureSkipVerify: true - serverName: dashboard.internal.${DOMAIN} + serverName: dashboard.{{ .cloud.internalDomain }} diff --git a/setup/cluster/kubernetes-dashboard/kustomize.template/kustomization.yaml b/setup/cluster/kubernetes-dashboard/kustomize.template/kustomization.yaml new file mode 100644 index 0000000..d7b8227 --- /dev/null +++ b/setup/cluster/kubernetes-dashboard/kustomize.template/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- dashboard-admin-rbac.yaml +- dashboard-kube-system.yaml \ No newline at end of file diff --git a/setup/cluster/kubernetes-dashboard/kustomize/dashboard-admin-rbac.yaml b/setup/cluster/kubernetes-dashboard/kustomize/dashboard-admin-rbac.yaml new file mode 100644 index 0000000..8316d74 --- /dev/null +++ b/setup/cluster/kubernetes-dashboard/kustomize/dashboard-admin-rbac.yaml @@ -0,0 +1,32 @@ +--- +# Service Account and RBAC for Dashboard admin access +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dashboard-admin + namespace: kubernetes-dashboard + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: dashboard-admin +subjects: + - kind: ServiceAccount + name: dashboard-admin + namespace: kubernetes-dashboard +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io + +--- +# Token for dashboard-admin +apiVersion: v1 +kind: Secret +metadata: + name: dashboard-admin-token + namespace: kubernetes-dashboard + annotations: + kubernetes.io/service-account.name: dashboard-admin +type: kubernetes.io/service-account-token \ No newline at end of file diff --git a/setup/cluster/kubernetes-dashboard/kustomize/dashboard-kube-system.yaml b/setup/cluster/kubernetes-dashboard/kustomize/dashboard-kube-system.yaml new file mode 100644 index 0000000..3e7f1a4 --- /dev/null +++ b/setup/cluster/kubernetes-dashboard/kustomize/dashboard-kube-system.yaml @@ -0,0 +1,84 @@ +--- +# Internal-only middleware +apiVersion: traefik.io/v1alpha1 +kind: Middleware +metadata: + name: internal-only + namespace: kubernetes-dashboard +spec: + ipWhiteList: + # Restrict to local private network ranges + sourceRange: + - 127.0.0.1/32 # localhost + - 10.0.0.0/8 # Private network + - 172.16.0.0/12 # Private network + - 192.168.0.0/16 # Private network + +--- +# HTTPS redirect middleware +apiVersion: traefik.io/v1alpha1 +kind: Middleware +metadata: + name: dashboard-redirect-scheme + namespace: kubernetes-dashboard +spec: + redirectScheme: + scheme: https + permanent: true + +--- +# IngressRoute for Dashboard +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: kubernetes-dashboard-https + namespace: kubernetes-dashboard +spec: + entryPoints: + - websecure + routes: + - match: Host(`dashboard.internal.cloud2.payne.io`) + kind: Rule + middlewares: + - name: internal-only + namespace: kubernetes-dashboard + services: + - name: kubernetes-dashboard + port: 443 + serversTransport: dashboard-transport + tls: + secretName: wildcard-internal-wild-cloud-tls + +--- +# HTTP to HTTPS redirect. +# FIXME: Is this needed? +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: kubernetes-dashboard-http + namespace: kubernetes-dashboard +spec: + entryPoints: + - web + routes: + - match: Host(`dashboard.internal.cloud2.payne.io`) + kind: Rule + middlewares: + - name: dashboard-redirect-scheme + namespace: kubernetes-dashboard + services: + - name: kubernetes-dashboard + port: 443 + serversTransport: dashboard-transport + +--- +# ServersTransport for HTTPS backend with skip verify. +# FIXME: Is this needed? +apiVersion: traefik.io/v1alpha1 +kind: ServersTransport +metadata: + name: dashboard-transport + namespace: kubernetes-dashboard +spec: + insecureSkipVerify: true + serverName: dashboard.internal.cloud2.payne.io diff --git a/setup/cluster/kubernetes-dashboard/kustomize/kustomization.yaml b/setup/cluster/kubernetes-dashboard/kustomize/kustomization.yaml new file mode 100644 index 0000000..d7b8227 --- /dev/null +++ b/setup/cluster/kubernetes-dashboard/kustomize/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- dashboard-admin-rbac.yaml +- dashboard-kube-system.yaml \ No newline at end of file diff --git a/setup/cluster/longhorn/install.sh b/setup/cluster/longhorn/install.sh new file mode 100755 index 0000000..9e15237 --- /dev/null +++ b/setup/cluster/longhorn/install.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +LONGHORN_DIR="${CLUSTER_SETUP_DIR}/longhorn" + +echo "Setting up Longhorn..." + +# Process templates with wild-compile-template-dir +echo "Processing Longhorn templates..." +wild-compile-template-dir --clean ${LONGHORN_DIR}/kustomize.template ${LONGHORN_DIR}/kustomize + +# Apply Longhorn with kustomize to apply our customizations +kubectl apply -k ${LONGHORN_DIR}/kustomize/ + +echo "Longhorn setup complete!" diff --git a/setup/cluster/longhorn/kustomization.yaml b/setup/cluster/longhorn/kustomize.template/kustomization.yaml similarity index 100% rename from setup/cluster/longhorn/kustomization.yaml rename to setup/cluster/longhorn/kustomize.template/kustomization.yaml diff --git a/setup/cluster/longhorn/longhorn.yaml b/setup/cluster/longhorn/kustomize.template/longhorn.yaml similarity index 99% rename from setup/cluster/longhorn/longhorn.yaml rename to setup/cluster/longhorn/kustomize.template/longhorn.yaml index 162a7e2..13e231c 100644 --- a/setup/cluster/longhorn/longhorn.yaml +++ b/setup/cluster/longhorn/kustomize.template/longhorn.yaml @@ -4,6 +4,10 @@ apiVersion: v1 kind: Namespace metadata: name: longhorn-system + labels: + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged --- # Source: longhorn/templates/priorityclass.yaml apiVersion: scheduling.k8s.io/v1 diff --git a/setup/cluster/longhorn/kustomize/kustomization.yaml b/setup/cluster/longhorn/kustomize/kustomization.yaml new file mode 100644 index 0000000..a2b8088 --- /dev/null +++ b/setup/cluster/longhorn/kustomize/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - longhorn.yaml diff --git a/setup/cluster/longhorn/kustomize/longhorn.yaml b/setup/cluster/longhorn/kustomize/longhorn.yaml new file mode 100644 index 0000000..13e231c --- /dev/null +++ b/setup/cluster/longhorn/kustomize/longhorn.yaml @@ -0,0 +1,5189 @@ +--- +# Builtin: "helm template" does not respect --create-namespace +apiVersion: v1 +kind: Namespace +metadata: + name: longhorn-system + labels: + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged +--- +# Source: longhorn/templates/priorityclass.yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: "longhorn-critical" + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +description: "Ensure Longhorn pods have the highest priority to prevent any unexpected eviction by the Kubernetes scheduler under node pressure" +globalDefault: false +preemptionPolicy: PreemptLowerPriority +value: 1000000000 +--- +# Source: longhorn/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-service-account + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +--- +# Source: longhorn/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-ui-service-account + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +--- +# Source: longhorn/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-support-bundle + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +--- +# Source: longhorn/templates/default-resource.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-resource + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +data: + default-resource.yaml: |- +--- +# Source: longhorn/templates/default-setting.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +data: + default-setting.yaml: |- + priority-class: longhorn-critical + disable-revision-counter: true +--- +# Source: longhorn/templates/storageclass.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-storageclass + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + annotations: + storageclass.kubernetes.io/is-default-class: "true" + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: "Delete" + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "3" + staleReplicaTimeout: "30" + fromBackup: "" + fsType: "ext4" + dataLocality: "disabled" + unmapMarkSnapChainRemoved: "ignored" + disableRevisionCounter: "true" + dataEngine: "v1" +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backingimagedatasources.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageDataSource + listKind: BackingImageDataSourceList + plural: backingimagedatasources + shortNames: + - lhbids + singular: backingimagedatasource + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the pod used to provision the backing image + file from source + jsonPath: .status.currentState + name: State + type: string + - description: The data source type + jsonPath: .spec.sourceType + name: SourceType + type: string + - description: The node the backing image file will be prepared on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the backing image file will be prepared on + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: BackingImageDataSource is where Longhorn stores backing image + data source object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The system generated UUID of the provisioned backing image file + jsonPath: .spec.uuid + name: UUID + type: string + - description: The current state of the pod used to provision the backing image + file from source + jsonPath: .status.currentState + name: State + type: string + - description: The data source type + jsonPath: .spec.sourceType + name: SourceType + type: string + - description: The backing image file size + jsonPath: .status.size + name: Size + type: string + - description: The node the backing image file will be prepared on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the backing image file will be prepared on + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackingImageDataSource is where Longhorn stores backing image + data source object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackingImageDataSourceSpec defines the desired state of the + Longhorn backing image data source + properties: + checksum: + type: string + diskPath: + type: string + diskUUID: + type: string + fileTransferred: + type: boolean + nodeID: + type: string + parameters: + additionalProperties: + type: string + type: object + sourceType: + enum: + - download + - upload + - export-from-volume + - restore + - clone + type: string + uuid: + type: string + type: object + status: + description: BackingImageDataSourceStatus defines the observed state of + the Longhorn backing image data source + properties: + checksum: + type: string + currentState: + type: string + ip: + type: string + message: + type: string + ownerID: + type: string + progress: + type: integer + runningParameters: + additionalProperties: + type: string + nullable: true + type: object + size: + format: int64 + type: integer + storageIP: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backingimagemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageManager + listKind: BackingImageManagerList + plural: backingimagemanagers + shortNames: + - lhbim + singular: backingimagemanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the manager + jsonPath: .status.currentState + name: State + type: string + - description: The image the manager pod will use + jsonPath: .spec.image + name: Image + type: string + - description: The node the manager is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - description: The disk path the manager is using + jsonPath: .spec.diskPath + name: DiskPath + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: BackingImageManager is where Longhorn stores backing image manager + object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The current state of the manager + jsonPath: .status.currentState + name: State + type: string + - description: The image the manager pod will use + jsonPath: .spec.image + name: Image + type: string + - description: The node the manager is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - description: The disk path the manager is using + jsonPath: .spec.diskPath + name: DiskPath + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackingImageManager is where Longhorn stores backing image manager + object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackingImageManagerSpec defines the desired state of the + Longhorn backing image manager + properties: + backingImages: + additionalProperties: + type: string + type: object + diskPath: + type: string + diskUUID: + type: string + image: + type: string + nodeID: + type: string + type: object + status: + description: BackingImageManagerStatus defines the observed state of the + Longhorn backing image manager + properties: + apiMinVersion: + type: integer + apiVersion: + type: integer + backingImageFileMap: + additionalProperties: + properties: + currentChecksum: + type: string + message: + type: string + name: + type: string + progress: + type: integer + realSize: + format: int64 + type: integer + senderManagerAddress: + type: string + sendingReference: + type: integer + size: + format: int64 + type: integer + state: + type: string + uuid: + type: string + virtualSize: + format: int64 + type: integer + type: object + nullable: true + type: object + currentState: + type: string + ip: + type: string + ownerID: + type: string + storageIP: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backingimages.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: longhorn-system + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: BackingImage + listKind: BackingImageList + plural: backingimages + shortNames: + - lhbi + singular: backingimage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backing image name + jsonPath: .spec.image + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: BackingImage is where Longhorn stores backing image object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The system generated UUID + jsonPath: .status.uuid + name: UUID + type: string + - description: The source of the backing image file data + jsonPath: .spec.sourceType + name: SourceType + type: string + - description: The backing image file size in each disk + jsonPath: .status.size + name: Size + type: string + - description: The virtual size of the image (may be larger than file size) + jsonPath: .status.virtualSize + name: VirtualSize + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackingImage is where Longhorn stores backing image object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackingImageSpec defines the desired state of the Longhorn + backing image + properties: + checksum: + type: string + dataEngine: + default: v1 + enum: + - v1 + - v2 + type: string + diskFileSpecMap: + additionalProperties: + properties: + dataEngine: + enum: + - v1 + - v2 + type: string + evictionRequested: + type: boolean + type: object + type: object + diskSelector: + items: + type: string + type: array + disks: + additionalProperties: + type: string + description: Deprecated. We are now using DiskFileSpecMap to assign + different spec to the file on different disks. + type: object + minNumberOfCopies: + type: integer + nodeSelector: + items: + type: string + type: array + secret: + type: string + secretNamespace: + type: string + sourceParameters: + additionalProperties: + type: string + type: object + sourceType: + enum: + - download + - upload + - export-from-volume + - restore + - clone + type: string + type: object + status: + description: BackingImageStatus defines the observed state of the Longhorn + backing image status + properties: + checksum: + type: string + diskFileStatusMap: + additionalProperties: + properties: + dataEngine: + enum: + - v1 + - v2 + type: string + lastStateTransitionTime: + type: string + message: + type: string + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + diskLastRefAtMap: + additionalProperties: + type: string + nullable: true + type: object + ownerID: + type: string + realSize: + description: Real size of image in bytes, which may be smaller than + the size when the file is a sparse file. Will be zero until known + (e.g. while a backing image is uploading) + format: int64 + type: integer + size: + format: int64 + type: integer + uuid: + type: string + v2FirstCopyDisk: + type: string + v2FirstCopyStatus: + description: It is pending -> in-progress -> ready/failed + type: string + virtualSize: + description: Virtual size of image in bytes, which may be larger than + physical size. Will be zero until known (e.g. while a backing image + is uploading) + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backupbackingimages.longhorn.io +spec: + group: longhorn.io + names: + kind: BackupBackingImage + listKind: BackupBackingImageList + plural: backupbackingimages + shortNames: + - lhbbi + singular: backupbackingimage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backing image name + jsonPath: .status.backingImage + name: BackingImage + type: string + - description: The backing image size + jsonPath: .status.size + name: Size + type: string + - description: The backing image backup upload finished time + jsonPath: .status.backupCreatedAt + name: BackupCreatedAt + type: string + - description: The backing image backup state + jsonPath: .status.state + name: State + type: string + - description: The last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupBackingImage is where Longhorn stores backing image backup + object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupBackingImageSpec defines the desired state of the Longhorn + backing image backup + properties: + backingImage: + description: |- + The backing image name. + type: string + backupTargetName: + description: The backup target name. + nullable: true + type: string + labels: + additionalProperties: + type: string + description: The labels of backing image backup. + type: object + syncRequestedAt: + description: The time to request run sync the remote backing image + backup. + format: date-time + nullable: true + type: string + userCreated: + description: |- + Is this CR created by user through API or UI. + type: boolean + required: + - backingImage + - userCreated + type: object + status: + description: BackupBackingImageStatus defines the observed state of the + Longhorn backing image backup + properties: + backingImage: + description: The backing image name. + type: string + backupCreatedAt: + description: The backing image backup upload finished time. + type: string + checksum: + description: The checksum of the backing image. + type: string + compressionMethod: + description: Compression method + type: string + error: + description: The error message when taking the backing image backup. + type: string + labels: + additionalProperties: + type: string + description: The labels of backing image backup. + nullable: true + type: object + lastSyncedAt: + description: The last time that the backing image backup was synced + with the remote backup target. + format: date-time + nullable: true + type: string + managerAddress: + description: The address of the backing image manager that runs backing + image backup. + type: string + messages: + additionalProperties: + type: string + description: The error messages when listing or inspecting backing + image backup. + nullable: true + type: object + ownerID: + description: The node ID on which the controller is responsible to + reconcile this CR. + type: string + progress: + description: The backing image backup progress. + type: integer + secret: + description: Record the secret if this backup backing image is encrypted + type: string + secretNamespace: + description: Record the secret namespace if this backup backing image + is encrypted + type: string + size: + description: The backing image size. + format: int64 + type: integer + state: + description: |- + The backing image backup creation state. + Can be "", "InProgress", "Completed", "Error", "Unknown". + type: string + url: + description: The backing image backup URL. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backups.longhorn.io +spec: + group: longhorn.io + names: + kind: Backup + listKind: BackupList + plural: backups + shortNames: + - lhb + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The snapshot name + jsonPath: .status.snapshotName + name: SnapshotName + type: string + - description: The snapshot size + jsonPath: .status.size + name: SnapshotSize + type: string + - description: The snapshot creation time + jsonPath: .status.snapshotCreatedAt + name: SnapshotCreatedAt + type: string + - description: The backup state + jsonPath: .status.state + name: State + type: string + - description: The backup last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: Backup is where Longhorn stores backup object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The snapshot name + jsonPath: .status.snapshotName + name: SnapshotName + type: string + - description: The snapshot size + jsonPath: .status.size + name: SnapshotSize + type: string + - description: The snapshot creation time + jsonPath: .status.snapshotCreatedAt + name: SnapshotCreatedAt + type: string + - description: The backup target name + jsonPath: .status.backupTargetName + name: BackupTarget + type: string + - description: The backup state + jsonPath: .status.state + name: State + type: string + - description: The backup last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: Backup is where Longhorn stores backup object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupSpec defines the desired state of the Longhorn backup + properties: + backupMode: + description: |- + The backup mode of this backup. + Can be "full" or "incremental" + enum: + - full + - incremental + - "" + type: string + labels: + additionalProperties: + type: string + description: The labels of snapshot backup. + type: object + snapshotName: + description: The snapshot name. + type: string + syncRequestedAt: + description: The time to request run sync the remote backup. + format: date-time + nullable: true + type: string + type: object + status: + description: BackupStatus defines the observed state of the Longhorn backup + properties: + backupCreatedAt: + description: The snapshot backup upload finished time. + type: string + backupTargetName: + description: The backup target name. + type: string + compressionMethod: + description: Compression method + type: string + error: + description: The error message when taking the snapshot backup. + type: string + labels: + additionalProperties: + type: string + description: The labels of snapshot backup. + nullable: true + type: object + lastSyncedAt: + description: The last time that the backup was synced with the remote + backup target. + format: date-time + nullable: true + type: string + messages: + additionalProperties: + type: string + description: The error messages when calling longhorn engine on listing + or inspecting backups. + nullable: true + type: object + newlyUploadDataSize: + description: Size in bytes of newly uploaded data + type: string + ownerID: + description: The node ID on which the controller is responsible to + reconcile this backup CR. + type: string + progress: + description: The snapshot backup progress. + type: integer + reUploadedDataSize: + description: Size in bytes of reuploaded data + type: string + replicaAddress: + description: The address of the replica that runs snapshot backup. + type: string + size: + description: The snapshot size. + type: string + snapshotCreatedAt: + description: The snapshot creation time. + type: string + snapshotName: + description: The snapshot name. + type: string + state: + description: |- + The backup creation state. + Can be "", "InProgress", "Completed", "Error", "Unknown". + type: string + url: + description: The snapshot backup URL. + type: string + volumeBackingImageName: + description: The volume's backing image name. + type: string + volumeCreated: + description: The volume creation time. + type: string + volumeName: + description: The volume name. + type: string + volumeSize: + description: The volume size. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backuptargets.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: longhorn-system + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: BackupTarget + listKind: BackupTargetList + plural: backuptargets + shortNames: + - lhbt + singular: backuptarget + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backup target URL + jsonPath: .spec.backupTargetURL + name: URL + type: string + - description: The backup target credential secret + jsonPath: .spec.credentialSecret + name: Credential + type: string + - description: The backup target poll interval + jsonPath: .spec.pollInterval + name: LastBackupAt + type: string + - description: Indicate whether the backup target is available or not + jsonPath: .status.available + name: Available + type: boolean + - description: The backup target last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: BackupTarget is where Longhorn stores backup target object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The backup target URL + jsonPath: .spec.backupTargetURL + name: URL + type: string + - description: The backup target credential secret + jsonPath: .spec.credentialSecret + name: Credential + type: string + - description: The backup target poll interval + jsonPath: .spec.pollInterval + name: LastBackupAt + type: string + - description: Indicate whether the backup target is available or not + jsonPath: .status.available + name: Available + type: boolean + - description: The backup target last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupTarget is where Longhorn stores backup target object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupTargetSpec defines the desired state of the Longhorn + backup target + properties: + backupTargetURL: + description: The backup target URL. + type: string + credentialSecret: + description: The backup target credential secret. + type: string + pollInterval: + description: The interval that the cluster needs to run sync with + the backup target. + type: string + syncRequestedAt: + description: The time to request run sync the remote backup target. + format: date-time + nullable: true + type: string + type: object + status: + description: BackupTargetStatus defines the observed state of the Longhorn + backup target + properties: + available: + description: Available indicates if the remote backup target is available + or not. + type: boolean + conditions: + description: Records the reason on why the backup target is unavailable. + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + lastSyncedAt: + description: The last time that the controller synced with the remote + backup target. + format: date-time + nullable: true + type: string + ownerID: + description: The node ID on which the controller is responsible to + reconcile this backup target CR. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: backupvolumes.longhorn.io +spec: + group: longhorn.io + names: + kind: BackupVolume + listKind: BackupVolumeList + plural: backupvolumes + shortNames: + - lhbv + singular: backupvolume + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backup volume creation time + jsonPath: .status.createdAt + name: CreatedAt + type: string + - description: The backup volume last backup name + jsonPath: .status.lastBackupName + name: LastBackupName + type: string + - description: The backup volume last backup time + jsonPath: .status.lastBackupAt + name: LastBackupAt + type: string + - description: The backup volume last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: BackupVolume is where Longhorn stores backup volume object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The backup target name + jsonPath: .spec.backupTargetName + name: BackupTarget + type: string + - description: The backup volume creation time + jsonPath: .status.createdAt + name: CreatedAt + type: string + - description: The backup volume last backup name + jsonPath: .status.lastBackupName + name: LastBackupName + type: string + - description: The backup volume last backup time + jsonPath: .status.lastBackupAt + name: LastBackupAt + type: string + - description: The backup volume last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupVolume is where Longhorn stores backup volume object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BackupVolumeSpec defines the desired state of the Longhorn + backup volume + properties: + backupTargetName: + description: The backup target name that the backup volume was synced. + nullable: true + type: string + syncRequestedAt: + description: The time to request run sync the remote backup volume. + format: date-time + nullable: true + type: string + volumeName: + description: The volume name that the backup volume was used to backup. + type: string + type: object + status: + description: BackupVolumeStatus defines the observed state of the Longhorn + backup volume + properties: + backingImageChecksum: + description: the backing image checksum. + type: string + backingImageName: + description: The backing image name. + type: string + createdAt: + description: The backup volume creation time. + type: string + dataStored: + description: The backup volume block count. + type: string + labels: + additionalProperties: + type: string + description: The backup volume labels. + nullable: true + type: object + lastBackupAt: + description: The latest volume backup time. + type: string + lastBackupName: + description: The latest volume backup name. + type: string + lastModificationTime: + description: The backup volume config last modification time. + format: date-time + nullable: true + type: string + lastSyncedAt: + description: The last time that the backup volume was synced into + the cluster. + format: date-time + nullable: true + type: string + messages: + additionalProperties: + type: string + description: The error messages when call longhorn engine on list + or inspect backup volumes. + nullable: true + type: object + ownerID: + description: The node ID on which the controller is responsible to + reconcile this backup volume CR. + type: string + size: + description: The backup volume size. + type: string + storageClassName: + description: the storage class name of pv/pvc binding with the volume. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: engineimages.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: longhorn-system + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: EngineImage + listKind: EngineImageList + plural: engineimages + shortNames: + - lhei + singular: engineimage + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of the engine image + jsonPath: .status.state + name: State + type: string + - description: The Longhorn engine image + jsonPath: .spec.image + name: Image + type: string + - description: Number of resources using the engine image + jsonPath: .status.refCount + name: RefCount + type: integer + - description: The build date of the engine image + jsonPath: .status.buildDate + name: BuildDate + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: EngineImage is where Longhorn stores engine image object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Compatibility of the engine image + jsonPath: .status.incompatible + name: Incompatible + type: boolean + - description: State of the engine image + jsonPath: .status.state + name: State + type: string + - description: The Longhorn engine image + jsonPath: .spec.image + name: Image + type: string + - description: Number of resources using the engine image + jsonPath: .status.refCount + name: RefCount + type: integer + - description: The build date of the engine image + jsonPath: .status.buildDate + name: BuildDate + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EngineImage is where Longhorn stores engine image object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EngineImageSpec defines the desired state of the Longhorn + engine image + properties: + image: + minLength: 1 + type: string + required: + - image + type: object + status: + description: EngineImageStatus defines the observed state of the Longhorn + engine image + properties: + buildDate: + type: string + cliAPIMinVersion: + type: integer + cliAPIVersion: + type: integer + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + controllerAPIMinVersion: + type: integer + controllerAPIVersion: + type: integer + dataFormatMinVersion: + type: integer + dataFormatVersion: + type: integer + gitCommit: + type: string + incompatible: + type: boolean + noRefSince: + type: string + nodeDeploymentMap: + additionalProperties: + type: boolean + nullable: true + type: object + ownerID: + type: string + refCount: + type: integer + state: + type: string + version: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: engines.longhorn.io +spec: + group: longhorn.io + names: + kind: Engine + listKind: EngineList + plural: engines + shortNames: + - lhe + singular: engine + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the engine + jsonPath: .status.currentState + name: State + type: string + - description: The node that the engine is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The instance manager of the engine + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the engine + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Engine is where Longhorn stores engine object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the engine + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The current state of the engine + jsonPath: .status.currentState + name: State + type: string + - description: The node that the engine is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The instance manager of the engine + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the engine + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Engine is where Longhorn stores engine object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: EngineSpec defines the desired state of the Longhorn engine + properties: + active: + type: boolean + backendStoreDriver: + description: Deprecated:Replaced by field `dataEngine`. + type: string + backupVolume: + type: string + dataEngine: + enum: + - v1 + - v2 + type: string + desireState: + type: string + disableFrontend: + type: boolean + engineImage: + description: 'Deprecated: Replaced by field `image`.' + type: string + frontend: + enum: + - blockdev + - iscsi + - nvmf + - "" + type: string + image: + type: string + logRequested: + type: boolean + nodeID: + type: string + replicaAddressMap: + additionalProperties: + type: string + type: object + requestedBackupRestore: + type: string + requestedDataSource: + type: string + revisionCounterDisabled: + type: boolean + salvageRequested: + type: boolean + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + unmapMarkSnapChainRemovedEnabled: + type: boolean + upgradedReplicaAddressMap: + additionalProperties: + type: string + type: object + volumeName: + type: string + volumeSize: + format: int64 + type: string + type: object + status: + description: EngineStatus defines the observed state of the Longhorn engine + properties: + backupStatus: + additionalProperties: + properties: + backupURL: + type: string + error: + type: string + progress: + type: integer + replicaAddress: + type: string + snapshotName: + type: string + state: + type: string + type: object + nullable: true + type: object + cloneStatus: + additionalProperties: + properties: + error: + type: string + fromReplicaAddress: + type: string + isCloning: + type: boolean + progress: + type: integer + snapshotName: + type: string + state: + type: string + type: object + nullable: true + type: object + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + currentImage: + type: string + currentReplicaAddressMap: + additionalProperties: + type: string + nullable: true + type: object + currentSize: + format: int64 + type: string + currentState: + type: string + endpoint: + type: string + instanceManagerName: + type: string + ip: + type: string + isExpanding: + type: boolean + lastExpansionError: + type: string + lastExpansionFailedAt: + type: string + lastRestoredBackup: + type: string + logFetched: + type: boolean + ownerID: + type: string + port: + type: integer + purgeStatus: + additionalProperties: + properties: + error: + type: string + isPurging: + type: boolean + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + rebuildStatus: + additionalProperties: + properties: + error: + type: string + fromReplicaAddress: + type: string + isRebuilding: + type: boolean + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + replicaModeMap: + additionalProperties: + type: string + nullable: true + type: object + replicaTransitionTimeMap: + additionalProperties: + type: string + description: |- + ReplicaTransitionTimeMap records the time a replica in ReplicaModeMap transitions from one mode to another (or + from not being in the ReplicaModeMap to being in it). This information is sometimes required by other controllers + (e.g. the volume controller uses it to determine the correct value for replica.Spec.lastHealthyAt). + type: object + restoreStatus: + additionalProperties: + properties: + backupURL: + type: string + currentRestoringBackup: + type: string + error: + type: string + filename: + type: string + isRestoring: + type: boolean + lastRestored: + type: string + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + salvageExecuted: + type: boolean + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + snapshots: + additionalProperties: + properties: + children: + additionalProperties: + type: boolean + nullable: true + type: object + created: + type: string + labels: + additionalProperties: + type: string + nullable: true + type: object + name: + type: string + parent: + type: string + removed: + type: boolean + size: + type: string + usercreated: + type: boolean + type: object + nullable: true + type: object + snapshotsError: + type: string + started: + type: boolean + storageIP: + type: string + unmapMarkSnapChainRemovedEnabled: + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: instancemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: InstanceManager + listKind: InstanceManagerList + plural: instancemanagers + shortNames: + - lhim + singular: instancemanager + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the instance manager + jsonPath: .status.currentState + name: State + type: string + - description: The type of the instance manager (engine or replica) + jsonPath: .spec.type + name: Type + type: string + - description: The node that the instance manager is running on + jsonPath: .spec.nodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: InstanceManager is where Longhorn stores instance manager object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the instance manager + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The state of the instance manager + jsonPath: .status.currentState + name: State + type: string + - description: The type of the instance manager (engine or replica) + jsonPath: .spec.type + name: Type + type: string + - description: The node that the instance manager is running on + jsonPath: .spec.nodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: InstanceManager is where Longhorn stores instance manager object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: InstanceManagerSpec defines the desired state of the Longhorn + instance manager + properties: + dataEngine: + type: string + dataEngineSpec: + properties: + v2: + properties: + cpuMask: + type: string + type: object + type: object + image: + type: string + nodeID: + type: string + type: + enum: + - aio + - engine + - replica + type: string + type: object + status: + description: InstanceManagerStatus defines the observed state of the Longhorn + instance manager + properties: + apiMinVersion: + type: integer + apiVersion: + type: integer + backingImages: + additionalProperties: + properties: + currentChecksum: + type: string + diskUUID: + type: string + message: + type: string + name: + type: string + progress: + type: integer + size: + format: int64 + type: integer + state: + type: string + uuid: + type: string + type: object + nullable: true + type: object + currentState: + type: string + dataEngineStatus: + properties: + v2: + properties: + cpuMask: + type: string + type: object + type: object + instanceEngines: + additionalProperties: + properties: + spec: + properties: + backendStoreDriver: + description: Deprecated:Replaced by field `dataEngine`. + type: string + dataEngine: + type: string + name: + type: string + type: object + status: + properties: + conditions: + additionalProperties: + type: boolean + nullable: true + type: object + endpoint: + type: string + errorMsg: + type: string + listen: + type: string + portEnd: + format: int32 + type: integer + portStart: + format: int32 + type: integer + resourceVersion: + format: int64 + type: integer + state: + type: string + targetPortEnd: + format: int32 + type: integer + targetPortStart: + format: int32 + type: integer + type: + type: string + type: object + type: object + nullable: true + type: object + instanceReplicas: + additionalProperties: + properties: + spec: + properties: + backendStoreDriver: + description: Deprecated:Replaced by field `dataEngine`. + type: string + dataEngine: + type: string + name: + type: string + type: object + status: + properties: + conditions: + additionalProperties: + type: boolean + nullable: true + type: object + endpoint: + type: string + errorMsg: + type: string + listen: + type: string + portEnd: + format: int32 + type: integer + portStart: + format: int32 + type: integer + resourceVersion: + format: int64 + type: integer + state: + type: string + targetPortEnd: + format: int32 + type: integer + targetPortStart: + format: int32 + type: integer + type: + type: string + type: object + type: object + nullable: true + type: object + instances: + additionalProperties: + properties: + spec: + properties: + backendStoreDriver: + description: Deprecated:Replaced by field `dataEngine`. + type: string + dataEngine: + type: string + name: + type: string + type: object + status: + properties: + conditions: + additionalProperties: + type: boolean + nullable: true + type: object + endpoint: + type: string + errorMsg: + type: string + listen: + type: string + portEnd: + format: int32 + type: integer + portStart: + format: int32 + type: integer + resourceVersion: + format: int64 + type: integer + state: + type: string + targetPortEnd: + format: int32 + type: integer + targetPortStart: + format: int32 + type: integer + type: + type: string + type: object + type: object + description: 'Deprecated: Replaced by InstanceEngines and InstanceReplicas' + nullable: true + type: object + ip: + type: string + ownerID: + type: string + proxyApiMinVersion: + type: integer + proxyApiVersion: + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: nodes.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: longhorn-system + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: Node + listKind: NodeList + plural: nodes + shortNames: + - lhn + singular: node + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicate whether the node is ready + jsonPath: .status.conditions['Ready']['status'] + name: Ready + type: string + - description: Indicate whether the user disabled/enabled replica scheduling for + the node + jsonPath: .spec.allowScheduling + name: AllowScheduling + type: boolean + - description: Indicate whether Longhorn can schedule replicas on the node + jsonPath: .status.conditions['Schedulable']['status'] + name: Schedulable + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Node is where Longhorn stores Longhorn node object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicate whether the node is ready + jsonPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - description: Indicate whether the user disabled/enabled replica scheduling for + the node + jsonPath: .spec.allowScheduling + name: AllowScheduling + type: boolean + - description: Indicate whether Longhorn can schedule replicas on the node + jsonPath: .status.conditions[?(@.type=='Schedulable')].status + name: Schedulable + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Node is where Longhorn stores Longhorn node object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodeSpec defines the desired state of the Longhorn node + properties: + allowScheduling: + type: boolean + disks: + additionalProperties: + properties: + allowScheduling: + type: boolean + diskDriver: + enum: + - "" + - auto + - aio + type: string + diskType: + enum: + - filesystem + - block + type: string + evictionRequested: + type: boolean + path: + type: string + storageReserved: + format: int64 + type: integer + tags: + items: + type: string + type: array + type: object + type: object + evictionRequested: + type: boolean + instanceManagerCPURequest: + type: integer + name: + type: string + tags: + items: + type: string + type: array + type: object + status: + description: NodeStatus defines the observed state of the Longhorn node + properties: + autoEvicting: + type: boolean + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + diskStatus: + additionalProperties: + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from + one status to another. + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the + condition's last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + diskDriver: + type: string + diskName: + type: string + diskPath: + type: string + diskType: + type: string + diskUUID: + type: string + filesystemType: + type: string + instanceManagerName: + type: string + scheduledBackingImage: + additionalProperties: + format: int64 + type: integer + nullable: true + type: object + scheduledReplica: + additionalProperties: + format: int64 + type: integer + nullable: true + type: object + storageAvailable: + format: int64 + type: integer + storageMaximum: + format: int64 + type: integer + storageScheduled: + format: int64 + type: integer + type: object + nullable: true + type: object + region: + type: string + snapshotCheckStatus: + properties: + lastPeriodicCheckedAt: + format: date-time + type: string + type: object + zone: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: orphans.longhorn.io +spec: + group: longhorn.io + names: + kind: Orphan + listKind: OrphanList + plural: orphans + shortNames: + - lho + singular: orphan + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The type of the orphan + jsonPath: .spec.orphanType + name: Type + type: string + - description: The node that the orphan is on + jsonPath: .spec.nodeID + name: Node + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: Orphan is where Longhorn stores orphan object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OrphanSpec defines the desired state of the Longhorn orphaned + data + properties: + nodeID: + description: The node ID on which the controller is responsible to + reconcile this orphan CR. + type: string + orphanType: + description: |- + The type of the orphaned data. + Can be "replica". + type: string + parameters: + additionalProperties: + type: string + description: The parameters of the orphaned data + type: object + type: object + status: + description: OrphanStatus defines the observed state of the Longhorn orphaned + data + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + ownerID: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: recurringjobs.longhorn.io +spec: + group: longhorn.io + names: + kind: RecurringJob + listKind: RecurringJobList + plural: recurringjobs + shortNames: + - lhrj + singular: recurringjob + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Sets groupings to the jobs. When set to "default" group will be + added to the volume label when no other job label exist in volume + jsonPath: .spec.groups + name: Groups + type: string + - description: Should be one of "backup" or "snapshot" + jsonPath: .spec.task + name: Task + type: string + - description: The cron expression represents recurring job scheduling + jsonPath: .spec.cron + name: Cron + type: string + - description: The number of snapshots/backups to keep for the volume + jsonPath: .spec.retain + name: Retain + type: integer + - description: The concurrent job to run by each cron job + jsonPath: .spec.concurrency + name: Concurrency + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Specify the labels + jsonPath: .spec.labels + name: Labels + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: RecurringJob is where Longhorn stores recurring job object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Sets groupings to the jobs. When set to "default" group will be + added to the volume label when no other job label exist in volume + jsonPath: .spec.groups + name: Groups + type: string + - description: Should be one of "snapshot", "snapshot-force-create", "snapshot-cleanup", + "snapshot-delete", "backup", "backup-force-create" or "filesystem-trim" + jsonPath: .spec.task + name: Task + type: string + - description: The cron expression represents recurring job scheduling + jsonPath: .spec.cron + name: Cron + type: string + - description: The number of snapshots/backups to keep for the volume + jsonPath: .spec.retain + name: Retain + type: integer + - description: The concurrent job to run by each cron job + jsonPath: .spec.concurrency + name: Concurrency + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Specify the labels + jsonPath: .spec.labels + name: Labels + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: RecurringJob is where Longhorn stores recurring job object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RecurringJobSpec defines the desired state of the Longhorn + recurring job + properties: + concurrency: + description: The concurrency of taking the snapshot/backup. + type: integer + cron: + description: The cron setting. + type: string + groups: + description: The recurring job group. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: The label of the snapshot/backup. + type: object + name: + description: The recurring job name. + type: string + parameters: + additionalProperties: + type: string + description: |- + The parameters of the snapshot/backup. + Support parameters: "full-backup-interval". + type: object + retain: + description: The retain count of the snapshot/backup. + type: integer + task: + description: |- + The recurring job task. + Can be "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup", "backup-force-create" or "filesystem-trim" + enum: + - snapshot + - snapshot-force-create + - snapshot-cleanup + - snapshot-delete + - backup + - backup-force-create + - filesystem-trim + type: string + type: object + status: + description: RecurringJobStatus defines the observed state of the Longhorn + recurring job + properties: + executionCount: + description: The number of jobs that have been triggered. + type: integer + ownerID: + description: The owner ID which is responsible to reconcile this recurring + job CR. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: replicas.longhorn.io +spec: + group: longhorn.io + names: + kind: Replica + listKind: ReplicaList + plural: replicas + shortNames: + - lhr + singular: replica + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the replica + jsonPath: .status.currentState + name: State + type: string + - description: The node that the replica is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk that the replica is on + jsonPath: .spec.diskID + name: Disk + type: string + - description: The instance manager of the replica + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the replica + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Replica is where Longhorn stores replica object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the replica + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The current state of the replica + jsonPath: .status.currentState + name: State + type: string + - description: The node that the replica is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk that the replica is on + jsonPath: .spec.diskID + name: Disk + type: string + - description: The instance manager of the replica + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the replica + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Replica is where Longhorn stores replica object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ReplicaSpec defines the desired state of the Longhorn replica + properties: + active: + type: boolean + backendStoreDriver: + description: Deprecated:Replaced by field `dataEngine`. + type: string + backingImage: + type: string + dataDirectoryName: + type: string + dataEngine: + enum: + - v1 + - v2 + type: string + desireState: + type: string + diskID: + type: string + diskPath: + type: string + engineImage: + description: 'Deprecated: Replaced by field `image`.' + type: string + engineName: + type: string + evictionRequested: + type: boolean + failedAt: + description: |- + FailedAt is set when a running replica fails or when a running engine is unable to use a replica for any reason. + FailedAt indicates the time the failure occurred. When FailedAt is set, a replica is likely to have useful + (though possibly stale) data. A replica with FailedAt set must be rebuilt from a non-failed replica (or it can + be used in a salvage if all replicas are failed). FailedAt is cleared before a rebuild or salvage. FailedAt may + be later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume + controller acknowledges the change. + type: string + hardNodeAffinity: + type: string + healthyAt: + description: |- + HealthyAt is set the first time a replica becomes read/write in an engine after creation or rebuild. HealthyAt + indicates the time the last successful rebuild occurred. When HealthyAt is set, a replica is likely to have + useful (though possibly stale) data. HealthyAt is cleared before a rebuild. HealthyAt may be later than the + corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller + acknowledges the change. + type: string + image: + type: string + lastFailedAt: + description: |- + LastFailedAt is always set at the same time as FailedAt. Unlike FailedAt, LastFailedAt is never cleared. + LastFailedAt is not a reliable indicator of the state of a replica's data. For example, a replica with + LastFailedAt may already be healthy and in use again. However, because it is never cleared, it can be compared to + LastHealthyAt to help prevent dangerous replica deletion in some corner cases. LastFailedAt may be later than the + corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller + acknowledges the change. + type: string + lastHealthyAt: + description: |- + LastHealthyAt is set every time a replica becomes read/write in an engine. Unlike HealthyAt, LastHealthyAt is + never cleared. LastHealthyAt is not a reliable indicator of the state of a replica's data. For example, a + replica with LastHealthyAt set may be in the middle of a rebuild. However, because it is never cleared, it can be + compared to LastFailedAt to help prevent dangerous replica deletion in some corner cases. LastHealthyAt may be + later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume + controller acknowledges the change. + type: string + logRequested: + type: boolean + migrationEngineName: + description: |- + MigrationEngineName is indicating the migrating engine which current connected to this replica. This is only + used for live migration of v2 data engine + type: string + nodeID: + type: string + rebuildRetryCount: + type: integer + revisionCounterDisabled: + type: boolean + salvageRequested: + type: boolean + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + unmapMarkDiskChainRemovedEnabled: + type: boolean + volumeName: + type: string + volumeSize: + format: int64 + type: string + type: object + status: + description: ReplicaStatus defines the observed state of the Longhorn + replica + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + currentImage: + type: string + currentState: + type: string + evictionRequested: + description: 'Deprecated: Replaced by field `spec.evictionRequested`.' + type: boolean + instanceManagerName: + type: string + ip: + type: string + logFetched: + type: boolean + ownerID: + type: string + port: + type: integer + salvageExecuted: + type: boolean + started: + type: boolean + storageIP: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: settings.longhorn.io +spec: + group: longhorn.io + names: + kind: Setting + listKind: SettingList + plural: settings + shortNames: + - lhs + singular: setting + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The value of the setting + jsonPath: .value + name: Value + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Setting is where Longhorn stores setting object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + value: + type: string + required: + - value + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The value of the setting + jsonPath: .value + name: Value + type: string + - description: The setting is applied + jsonPath: .status.applied + name: Applied + type: boolean + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Setting is where Longhorn stores setting object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + status: + description: The status of the setting. + properties: + applied: + description: The setting is applied. + type: boolean + required: + - applied + type: object + value: + description: The value of the setting. + type: string + required: + - value + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: sharemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: ShareManager + listKind: ShareManagerList + plural: sharemanagers + shortNames: + - lhsm + singular: sharemanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the share manager + jsonPath: .status.state + name: State + type: string + - description: The node that the share manager is owned by + jsonPath: .status.ownerID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: ShareManager is where Longhorn stores share manager object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The state of the share manager + jsonPath: .status.state + name: State + type: string + - description: The node that the share manager is owned by + jsonPath: .status.ownerID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ShareManager is where Longhorn stores share manager object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ShareManagerSpec defines the desired state of the Longhorn + share manager + properties: + image: + description: Share manager image used for creating a share manager + pod + type: string + type: object + status: + description: ShareManagerStatus defines the observed state of the Longhorn + share manager + properties: + endpoint: + description: NFS endpoint that can access the mounted filesystem of + the volume + type: string + ownerID: + description: The node ID on which the controller is responsible to + reconcile this share manager resource + type: string + state: + description: The state of the share manager resource + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: snapshots.longhorn.io +spec: + group: longhorn.io + names: + kind: Snapshot + listKind: SnapshotList + plural: snapshots + shortNames: + - lhsnap + singular: snapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The volume that this snapshot belongs to + jsonPath: .spec.volume + name: Volume + type: string + - description: Timestamp when the point-in-time snapshot was taken + jsonPath: .status.creationTime + name: CreationTime + type: string + - description: Indicates if the snapshot is ready to be used to restore/backup + a volume + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the minimum size of volume required to rehydrate from + this snapshot + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The actual size of the snapshot + jsonPath: .status.size + name: Size + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Snapshot is the Schema for the snapshots API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SnapshotSpec defines the desired state of Longhorn Snapshot + properties: + createSnapshot: + description: require creating a new snapshot + type: boolean + labels: + additionalProperties: + type: string + description: The labels of snapshot + nullable: true + type: object + volume: + description: |- + the volume that this snapshot belongs to. + This field is immutable after creation. + type: string + required: + - volume + type: object + status: + description: SnapshotStatus defines the observed state of Longhorn Snapshot + properties: + checksum: + type: string + children: + additionalProperties: + type: boolean + nullable: true + type: object + creationTime: + type: string + error: + type: string + labels: + additionalProperties: + type: string + nullable: true + type: object + markRemoved: + type: boolean + ownerID: + type: string + parent: + type: string + readyToUse: + type: boolean + restoreSize: + format: int64 + type: integer + size: + format: int64 + type: integer + userCreated: + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: supportbundles.longhorn.io +spec: + group: longhorn.io + names: + kind: SupportBundle + listKind: SupportBundleList + plural: supportbundles + shortNames: + - lhbundle + singular: supportbundle + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the support bundle + jsonPath: .status.state + name: State + type: string + - description: The issue URL + jsonPath: .spec.issueURL + name: Issue + type: string + - description: A brief description of the issue + jsonPath: .spec.description + name: Description + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SupportBundle is where Longhorn stores support bundle object + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SupportBundleSpec defines the desired state of the Longhorn + SupportBundle + properties: + description: + description: A brief description of the issue + type: string + issueURL: + description: The issue URL + nullable: true + type: string + nodeID: + description: The preferred responsible controller node ID. + type: string + required: + - description + type: object + status: + description: SupportBundleStatus defines the observed state of the Longhorn + SupportBundle + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + type: array + filename: + type: string + filesize: + format: int64 + type: integer + image: + description: The support bundle manager image + type: string + managerIP: + description: The support bundle manager IP + type: string + ownerID: + description: The current responsible controller node ID + type: string + progress: + type: integer + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: systembackups.longhorn.io +spec: + group: longhorn.io + names: + kind: SystemBackup + listKind: SystemBackupList + plural: systembackups + shortNames: + - lhsb + singular: systembackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The system backup Longhorn version + jsonPath: .status.version + name: Version + type: string + - description: The system backup state + jsonPath: .status.state + name: State + type: string + - description: The system backup creation time + jsonPath: .status.createdAt + name: Created + type: string + - description: The last time that the system backup was synced into the cluster + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: SystemBackup is where Longhorn stores system backup object + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SystemBackupSpec defines the desired state of the Longhorn + SystemBackup + properties: + volumeBackupPolicy: + description: |- + The create volume backup policy + Can be "if-not-present", "always" or "disabled" + nullable: true + type: string + type: object + status: + description: SystemBackupStatus defines the observed state of the Longhorn + SystemBackup + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + createdAt: + description: The system backup creation time. + format: date-time + type: string + gitCommit: + description: The saved Longhorn manager git commit. + nullable: true + type: string + lastSyncedAt: + description: The last time that the system backup was synced into + the cluster. + format: date-time + nullable: true + type: string + managerImage: + description: The saved manager image. + type: string + ownerID: + description: The node ID of the responsible controller to reconcile + this SystemBackup. + type: string + state: + description: The system backup state. + type: string + version: + description: The saved Longhorn version. + nullable: true + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: systemrestores.longhorn.io +spec: + group: longhorn.io + names: + kind: SystemRestore + listKind: SystemRestoreList + plural: systemrestores + shortNames: + - lhsr + singular: systemrestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The system restore state + jsonPath: .status.state + name: State + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SystemRestore is where Longhorn stores system restore object + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SystemRestoreSpec defines the desired state of the Longhorn + SystemRestore + properties: + systemBackup: + description: The system backup name in the object store. + type: string + required: + - systemBackup + type: object + status: + description: SystemRestoreStatus defines the observed state of the Longhorn + SystemRestore + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + ownerID: + description: The node ID of the responsible controller to reconcile + this SystemRestore. + type: string + sourceURL: + description: The source system backup URL. + type: string + state: + description: The system restore state. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: volumeattachments.longhorn.io +spec: + group: longhorn.io + names: + kind: VolumeAttachment + listKind: VolumeAttachmentList + plural: volumeattachments + shortNames: + - lhva + singular: volumeattachment + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VolumeAttachment stores attachment information of a Longhorn + volume + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VolumeAttachmentSpec defines the desired state of Longhorn + VolumeAttachment + properties: + attachmentTickets: + additionalProperties: + properties: + generation: + description: |- + A sequence number representing a specific generation of the desired state. + Populated by the system. Read-only. + format: int64 + type: integer + id: + description: The unique ID of this attachment. Used to differentiate + different attachments of the same volume. + type: string + nodeID: + description: The node that this attachment is requesting + type: string + parameters: + additionalProperties: + type: string + description: Optional additional parameter for this attachment + type: object + type: + type: string + type: object + type: object + volume: + description: The name of Longhorn volume of this VolumeAttachment + type: string + required: + - volume + type: object + status: + description: VolumeAttachmentStatus defines the observed state of Longhorn + VolumeAttachment + properties: + attachmentTicketStatuses: + additionalProperties: + properties: + conditions: + description: Record any error when trying to fulfill this attachment + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from + one status to another. + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the + condition's last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + generation: + description: |- + A sequence number representing a specific generation of the desired state. + Populated by the system. Read-only. + format: int64 + type: integer + id: + description: The unique ID of this attachment. Used to differentiate + different attachments of the same volume. + type: string + satisfied: + description: Indicate whether this attachment ticket has been + satisfied + type: boolean + required: + - conditions + - satisfied + type: object + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + longhorn-manager: "" + name: volumes.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: longhorn-system + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + shortNames: + - lhv + singular: volume + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the volume + jsonPath: .status.state + name: State + type: string + - description: The robustness of the volume + jsonPath: .status.robustness + name: Robustness + type: string + - description: The scheduled condition of the volume + jsonPath: .status.conditions['scheduled']['status'] + name: Scheduled + type: string + - description: The size of the volume + jsonPath: .spec.size + name: Size + type: string + - description: The node that the volume is currently attaching to + jsonPath: .status.currentNodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Volume is where Longhorn stores volume object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the volume + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The state of the volume + jsonPath: .status.state + name: State + type: string + - description: The robustness of the volume + jsonPath: .status.robustness + name: Robustness + type: string + - description: The scheduled condition of the volume + jsonPath: .status.conditions[?(@.type=='Schedulable')].status + name: Scheduled + type: string + - description: The size of the volume + jsonPath: .spec.size + name: Size + type: string + - description: The node that the volume is currently attaching to + jsonPath: .status.currentNodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Volume is where Longhorn stores volume object. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: VolumeSpec defines the desired state of the Longhorn volume + properties: + Standby: + type: boolean + accessMode: + enum: + - rwo + - rwx + type: string + backendStoreDriver: + description: Deprecated:Replaced by field `dataEngine`.' + type: string + backingImage: + type: string + backupCompressionMethod: + enum: + - none + - lz4 + - gzip + type: string + backupTargetName: + description: The backup target name that the volume will be backed + up to or is synced. + type: string + dataEngine: + enum: + - v1 + - v2 + type: string + dataLocality: + enum: + - disabled + - best-effort + - strict-local + type: string + dataSource: + type: string + disableFrontend: + type: boolean + diskSelector: + items: + type: string + type: array + encrypted: + type: boolean + engineImage: + description: 'Deprecated: Replaced by field `image`.' + type: string + freezeFilesystemForSnapshot: + description: Setting that freezes the filesystem on the root partition + before a snapshot is created. + enum: + - ignored + - enabled + - disabled + type: string + fromBackup: + type: string + frontend: + enum: + - blockdev + - iscsi + - nvmf + - "" + type: string + image: + type: string + lastAttachedBy: + type: string + migratable: + type: boolean + migrationNodeID: + type: string + nodeID: + type: string + nodeSelector: + items: + type: string + type: array + numberOfReplicas: + type: integer + replicaAutoBalance: + enum: + - ignored + - disabled + - least-effort + - best-effort + type: string + replicaDiskSoftAntiAffinity: + description: Replica disk soft anti affinity of the volume. Set enabled + to allow replicas to be scheduled in the same disk. + enum: + - ignored + - enabled + - disabled + type: string + replicaSoftAntiAffinity: + description: Replica soft anti affinity of the volume. Set enabled + to allow replicas to be scheduled on the same node. + enum: + - ignored + - enabled + - disabled + type: string + replicaZoneSoftAntiAffinity: + description: Replica zone soft anti affinity of the volume. Set enabled + to allow replicas to be scheduled in the same zone. + enum: + - ignored + - enabled + - disabled + type: string + restoreVolumeRecurringJob: + enum: + - ignored + - enabled + - disabled + type: string + revisionCounterDisabled: + type: boolean + size: + format: int64 + type: string + snapshotDataIntegrity: + enum: + - ignored + - disabled + - enabled + - fast-check + type: string + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + staleReplicaTimeout: + type: integer + unmapMarkSnapChainRemoved: + enum: + - ignored + - disabled + - enabled + type: string + type: object + status: + description: VolumeStatus defines the observed state of the Longhorn volume + properties: + actualSize: + format: int64 + type: integer + cloneStatus: + properties: + attemptCount: + type: integer + nextAllowedAttemptAt: + type: string + snapshot: + type: string + sourceVolume: + type: string + state: + type: string + type: object + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + type: string + message: + description: Human-readable message indicating details about + last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's + last transition. + type: string + status: + description: |- + Status is the status of the condition. + Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + currentImage: + type: string + currentMigrationNodeID: + description: the node that this volume is currently migrating to + type: string + currentNodeID: + type: string + expansionRequired: + type: boolean + frontendDisabled: + type: boolean + isStandby: + type: boolean + kubernetesStatus: + properties: + lastPVCRefAt: + type: string + lastPodRefAt: + type: string + namespace: + description: determine if PVC/Namespace is history or not + type: string + pvName: + type: string + pvStatus: + type: string + pvcName: + type: string + workloadsStatus: + description: determine if Pod/Workload is history or not + items: + properties: + podName: + type: string + podStatus: + type: string + workloadName: + type: string + workloadType: + type: string + type: object + nullable: true + type: array + type: object + lastBackup: + type: string + lastBackupAt: + type: string + lastDegradedAt: + type: string + ownerID: + type: string + pendingNodeID: + description: Deprecated. + type: string + remountRequestedAt: + type: string + restoreInitiated: + type: boolean + restoreRequired: + type: boolean + robustness: + type: string + shareEndpoint: + type: string + shareState: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +# Source: longhorn/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-role + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: [""] + resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] +- apiGroups: ["apps"] + resources: ["daemonsets", "statefulsets", "deployments"] + verbs: ["*"] +- apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["*"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets", "podsecuritypolicies"] + verbs: ["*"] +- apiGroups: ["scheduling.k8s.io"] + resources: ["priorityclasses"] + verbs: ["watch", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"] + verbs: ["*"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"] + verbs: ["*"] +- apiGroups: ["longhorn.io"] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", "settings/status", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", + "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status", + "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status", + "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status", + "volumeattachments", "volumeattachments/status", "backupbackingimages", "backupbackingimages/status"] + verbs: ["*"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] +- apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["list", "watch"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "patch", "delete"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"] + verbs: ["*"] +--- +# Source: longhorn/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-bind + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: longhorn-system +--- +# Source: longhorn/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-support-bundle + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: longhorn-support-bundle + namespace: longhorn-system +--- +# Source: longhorn/templates/daemonset-sa.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-manager + name: longhorn-backend + namespace: longhorn-system +spec: + type: ClusterIP + selector: + app: longhorn-manager + ports: + - name: manager + port: 9500 + targetPort: manager +--- +# Source: longhorn/templates/deployment-ui.yaml +kind: Service +apiVersion: v1 +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-ui + name: longhorn-frontend + namespace: longhorn-system +spec: + type: ClusterIP + selector: + app: longhorn-ui + ports: + - name: http + port: 80 + targetPort: http + nodePort: null +--- +# Source: longhorn/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-conversion-webhook + name: longhorn-conversion-webhook + namespace: longhorn-system +spec: + type: ClusterIP + selector: + longhorn.io/conversion-webhook: longhorn-conversion-webhook + ports: + - name: conversion-webhook + port: 9501 + targetPort: conversion-wh +--- +# Source: longhorn/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-admission-webhook + name: longhorn-admission-webhook + namespace: longhorn-system +spec: + type: ClusterIP + selector: + longhorn.io/admission-webhook: longhorn-admission-webhook + ports: + - name: admission-webhook + port: 9502 + targetPort: admission-wh +--- +# Source: longhorn/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-recovery-backend + name: longhorn-recovery-backend + namespace: longhorn-system +spec: + type: ClusterIP + selector: + longhorn.io/recovery-backend: longhorn-recovery-backend + ports: + - name: recovery-backend + port: 9503 + targetPort: recov-backend +--- +# Source: longhorn/templates/daemonset-sa.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-manager + name: longhorn-manager + namespace: longhorn-system +spec: + selector: + matchLabels: + app: longhorn-manager + template: + metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-manager + spec: + containers: + - name: longhorn-manager + image: longhornio/longhorn-manager:v1.8.1 + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + command: + - longhorn-manager + - -d + - daemon + - --engine-image + - "longhornio/longhorn-engine:v1.8.1" + - --instance-manager-image + - "longhornio/longhorn-instance-manager:v1.8.1" + - --share-manager-image + - "longhornio/longhorn-share-manager:v1.8.1" + - --backing-image-manager-image + - "longhornio/backing-image-manager:v1.8.1" + - --support-bundle-manager-image + - "longhornio/support-bundle-kit:v0.0.52" + - --manager-image + - "longhornio/longhorn-manager:v1.8.1" + - --service-account + - longhorn-service-account + - --upgrade-version-check + ports: + - containerPort: 9500 + name: manager + - containerPort: 9501 + name: conversion-wh + - containerPort: 9502 + name: admission-wh + - containerPort: 9503 + name: recov-backend + readinessProbe: + httpGet: + path: /v1/healthz + port: 9501 + scheme: HTTPS + volumeMounts: + - name: boot + mountPath: /host/boot/ + readOnly: true + - name: dev + mountPath: /host/dev/ + - name: proc + mountPath: /host/proc/ + readOnly: true + - name: etc + mountPath: /host/etc/ + readOnly: true + - name: longhorn + mountPath: /var/lib/longhorn/ + mountPropagation: Bidirectional + - name: longhorn-grpc-tls + mountPath: /tls-files/ + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: pre-pull-share-manager-image + imagePullPolicy: IfNotPresent + image: longhornio/longhorn-share-manager:v1.8.1 + command: ["sh", "-c", "echo share-manager image pulled && sleep infinity"] + volumes: + - name: boot + hostPath: + path: /boot/ + - name: dev + hostPath: + path: /dev/ + - name: proc + hostPath: + path: /proc/ + - name: etc + hostPath: + path: /etc/ + - name: longhorn + hostPath: + path: /var/lib/longhorn/ + - name: longhorn-grpc-tls + secret: + secretName: longhorn-grpc-tls + optional: true + priorityClassName: "longhorn-critical" + serviceAccountName: longhorn-service-account + updateStrategy: + rollingUpdate: + maxUnavailable: "100%" +--- +# Source: longhorn/templates/deployment-driver.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-driver-deployer + namespace: longhorn-system + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-driver-deployer + template: + metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-driver-deployer + spec: + initContainers: + - name: wait-longhorn-manager + image: longhornio/longhorn-manager:v1.8.1 + command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] + containers: + - name: longhorn-driver-deployer + image: longhornio/longhorn-manager:v1.8.1 + imagePullPolicy: IfNotPresent + command: + - longhorn-manager + - -d + - deploy-driver + - --manager-image + - "longhornio/longhorn-manager:v1.8.1" + - --manager-url + - http://longhorn-backend:9500/v1 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: CSI_ATTACHER_IMAGE + value: "longhornio/csi-attacher:v4.8.1" + - name: CSI_PROVISIONER_IMAGE + value: "longhornio/csi-provisioner:v5.2.0" + - name: CSI_NODE_DRIVER_REGISTRAR_IMAGE + value: "longhornio/csi-node-driver-registrar:v2.13.0" + - name: CSI_RESIZER_IMAGE + value: "longhornio/csi-resizer:v1.13.2" + - name: CSI_SNAPSHOTTER_IMAGE + value: "longhornio/csi-snapshotter:v8.2.0" + - name: CSI_LIVENESS_PROBE_IMAGE + value: "longhornio/livenessprobe:v2.15.0" + priorityClassName: "longhorn-critical" + serviceAccountName: longhorn-service-account + securityContext: + runAsUser: 0 +--- +# Source: longhorn/templates/deployment-ui.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-ui + name: longhorn-ui + namespace: longhorn-system +spec: + replicas: 2 + selector: + matchLabels: + app: longhorn-ui + template: + metadata: + labels: + app.kubernetes.io/name: longhorn + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.8.1 + app: longhorn-ui + spec: + serviceAccountName: longhorn-ui-service-account + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - longhorn-ui + topologyKey: kubernetes.io/hostname + containers: + - name: longhorn-ui + image: longhornio/longhorn-ui:v1.8.1 + imagePullPolicy: IfNotPresent + volumeMounts: + - name : nginx-cache + mountPath: /var/cache/nginx/ + - name : nginx-config + mountPath: /var/config/nginx/ + - name: var-run + mountPath: /var/run/ + ports: + - containerPort: 8000 + name: http + env: + - name: LONGHORN_MANAGER_IP + value: "http://longhorn-backend:9500" + - name: LONGHORN_UI_PORT + value: "8000" + volumes: + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-config + - emptyDir: {} + name: var-run + priorityClassName: "longhorn-critical" +--- +# Source: longhorn/templates/validate-psp-install.yaml +# diff --git a/setup/cluster/metallb/README.md b/setup/cluster/metallb/README.md new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster/metallb/configuration/kustomization.yaml b/setup/cluster/metallb/configuration/kustomization.yaml deleted file mode 100644 index dafe7d8..0000000 --- a/setup/cluster/metallb/configuration/kustomization.yaml +++ /dev/null @@ -1,18 +0,0 @@ -namespace: metallb-system -resources: - - pool.yaml -configMapGenerator: - - name: metallb-config - envs: - - config/config.env -replacements: - - source: - kind: ConfigMap - name: metallb-config - fieldPath: data.CLUSTER_LOAD_BALANCER_RANGE - targets: - - select: - kind: IPAddressPool - name: first-pool - fieldPaths: - - spec.addresses.0 diff --git a/setup/cluster/setup-metallb.sh b/setup/cluster/metallb/install.sh similarity index 50% rename from setup/cluster/setup-metallb.sh rename to setup/cluster/metallb/install.sh index c600ef3..5d969f3 100755 --- a/setup/cluster/setup-metallb.sh +++ b/setup/cluster/metallb/install.sh @@ -1,27 +1,29 @@ #!/bin/bash set -e -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 fi +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +METALLB_DIR="${CLUSTER_SETUP_DIR}/metallb" + echo "Setting up MetalLB..." +# Process templates with gomplate +echo "Processing MetalLB templates..." +wild-compile-template-dir --clean ${METALLB_DIR}/kustomize.template ${METALLB_DIR}/kustomize + echo "Deploying MetalLB..." -# cat ${SCRIPT_DIR}/metallb/metallb-helm-config.yaml | envsubst | kubectl apply -f - -kubectl apply -k metallb/installation +kubectl apply -k ${METALLB_DIR}/kustomize/installation echo "Waiting for MetalLB to be deployed..." kubectl wait --for=condition=Available deployment/controller -n metallb-system --timeout=60s sleep 10 # Extra buffer for webhook initialization echo "Customizing MetalLB..." -kubectl apply -k metallb/configuration +kubectl apply -k ${METALLB_DIR}/kustomize/configuration echo "✅ MetalLB installed and configured" echo "" diff --git a/setup/cluster/metallb/kustomize.template/configuration/kustomization.yaml b/setup/cluster/metallb/kustomize.template/configuration/kustomization.yaml new file mode 100644 index 0000000..35b3ac2 --- /dev/null +++ b/setup/cluster/metallb/kustomize.template/configuration/kustomization.yaml @@ -0,0 +1,3 @@ +namespace: metallb-system +resources: + - pool.yaml diff --git a/setup/cluster/metallb/configuration/pool.yaml b/setup/cluster/metallb/kustomize.template/configuration/pool.yaml similarity index 86% rename from setup/cluster/metallb/configuration/pool.yaml rename to setup/cluster/metallb/kustomize.template/configuration/pool.yaml index 5d40b3d..38ffaa7 100644 --- a/setup/cluster/metallb/configuration/pool.yaml +++ b/setup/cluster/metallb/kustomize.template/configuration/pool.yaml @@ -6,7 +6,7 @@ metadata: namespace: metallb-system spec: addresses: - - PLACEHOLDER_CLUSTER_LOAD_BALANCER_RANGE + - {{ .cluster.ipAddressPool }} --- apiVersion: metallb.io/v1beta1 diff --git a/setup/cluster/metallb/installation/kustomization.yaml b/setup/cluster/metallb/kustomize.template/installation/kustomization.yaml similarity index 100% rename from setup/cluster/metallb/installation/kustomization.yaml rename to setup/cluster/metallb/kustomize.template/installation/kustomization.yaml diff --git a/setup/cluster/metallb/kustomize/configuration/kustomization.yaml b/setup/cluster/metallb/kustomize/configuration/kustomization.yaml new file mode 100644 index 0000000..35b3ac2 --- /dev/null +++ b/setup/cluster/metallb/kustomize/configuration/kustomization.yaml @@ -0,0 +1,3 @@ +namespace: metallb-system +resources: + - pool.yaml diff --git a/setup/cluster/metallb/kustomize/configuration/pool.yaml b/setup/cluster/metallb/kustomize/configuration/pool.yaml new file mode 100644 index 0000000..d295395 --- /dev/null +++ b/setup/cluster/metallb/kustomize/configuration/pool.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - 192.168.8.20-192.168.8.29 + +--- +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: l2-advertisement + namespace: metallb-system +spec: + ipAddressPools: + - first-pool diff --git a/setup/cluster/metallb/kustomize/installation/kustomization.yaml b/setup/cluster/metallb/kustomize/installation/kustomization.yaml new file mode 100644 index 0000000..e6e0425 --- /dev/null +++ b/setup/cluster/metallb/kustomize/installation/kustomization.yaml @@ -0,0 +1,3 @@ +namespace: metallb-system +resources: + - github.com/metallb/metallb/config/native?ref=v0.15.0 diff --git a/setup/cluster/nfs/README.md b/setup/cluster/nfs/README.md new file mode 100644 index 0000000..cf119dc --- /dev/null +++ b/setup/cluster/nfs/README.md @@ -0,0 +1,54 @@ +# NFS Setup (Optional) + +The infrastructure supports optional NFS (Network File System) for shared media storage across the cluster: + +## Host Setup + +First, set up the NFS server on your chosen host. + +```bash +./setup-nfs-host.sh +``` + +## Cluster Integration + +Add to your `config.yaml`: + +```yaml +cloud: + nfs: + host: box-01 + mediaPath: /data/media + storageCapacity: 250Gi +``` + +And now you can run the nfs cluster setup: + +```bash +setup/setup-nfs-host.sh +``` + +## Features + +- Automatic IP detection - Uses network IP even when hostname resolves to localhost +- Cluster-wide access - Any pod can mount the NFS share regardless of node placement +- Configurable capacity - Set PersistentVolume size via `NFS_STORAGE_CAPACITY` +- ReadWriteMany - Multiple pods can simultaneously access the same storage + +## Usage + +Applications can use NFS storage by setting `storageClassName: nfs` in their PVCs: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: media-pvc +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs + resources: + requests: + storage: 100Gi +``` diff --git a/setup/cluster/setup-nfs.sh b/setup/cluster/nfs/install.sh similarity index 83% rename from setup/cluster/setup-nfs.sh rename to setup/cluster/nfs/install.sh index 5ea7e42..7d9146f 100755 --- a/setup/cluster/setup-nfs.sh +++ b/setup/cluster/nfs/install.sh @@ -2,31 +2,24 @@ set -e set -o pipefail -# Navigate to script directory -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -PROJECT_DIR="$(dirname "$SCRIPT_DIR")" +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi -# Source environment variables -source "${PROJECT_DIR}/load-env.sh" +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +NFS_DIR="${CLUSTER_SETUP_DIR}/nfs" echo "Registering NFS server with Kubernetes cluster..." -# Check if NFS_HOST is configured -if [[ -z "${NFS_HOST}" ]]; then - echo "NFS_HOST not set. Skipping NFS Kubernetes setup." - echo "To enable NFS media sharing:" - echo "1. Set NFS_HOST= in your environment" - echo "2. Run setup-nfs-host.sh on the NFS host" - echo "3. Re-run this script" - exit 0 -fi +# Process templates with wild-compile-template-dir +echo "Processing NFS templates..." +wild-compile-template-dir --clean ${NFS_DIR}/kustomize.template ${NFS_DIR}/kustomize -# Set default for NFS_STORAGE_CAPACITY if not already set -if [[ -z "${NFS_STORAGE_CAPACITY}" ]]; then - export NFS_STORAGE_CAPACITY="250Gi" - echo "Using default NFS_STORAGE_CAPACITY: ${NFS_STORAGE_CAPACITY}" -fi +# Get NFS configuration from config.yaml +NFS_HOST=$(wild-config cloud.nfs.host) || exit 1 +NFS_MEDIA_PATH=$(wild-config cloud.nfs.mediaPath) || exit 1 +NFS_STORAGE_CAPACITY=$(wild-config cloud.nfs.storageCapacity) || exit 1 echo "NFS host: ${NFS_HOST}" echo "Media path: ${NFS_MEDIA_PATH}" @@ -151,20 +144,9 @@ test_nfs_mount() { create_k8s_resources() { echo "Creating Kubernetes NFS resources..." - # Generate config file with resolved variables - local nfs_dir="${SCRIPT_DIR}/nfs" - local env_file="${nfs_dir}/config/.env" - local config_file="${nfs_dir}/config/config.env" - - echo "Generating NFS configuration..." - export NFS_HOST_IP - export NFS_MEDIA_PATH - export NFS_STORAGE_CAPACITY - envsubst < "${env_file}" > "${config_file}" - - # Apply the NFS Kubernetes manifests using kustomize - echo "Applying NFS manifests from: ${nfs_dir}" - kubectl apply -k "${nfs_dir}" + # Apply the NFS Kubernetes manifests using kustomize (templates already processed) + echo "Applying NFS manifests..." + kubectl apply -k "${NFS_DIR}/kustomize" echo "✓ NFS PersistentVolume and StorageClass created" diff --git a/setup/cluster/nfs/kustomization.yaml b/setup/cluster/nfs/kustomization.yaml deleted file mode 100644 index 4513254..0000000 --- a/setup/cluster/nfs/kustomization.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - persistent-volume.yaml - - storage-class.yaml - -replacements: - - source: - kind: ConfigMap - name: nfs-config - fieldPath: data.NFS_HOST_IP - targets: - - select: - kind: PersistentVolume - name: nfs-media-pv - fieldPaths: - - spec.nfs.server - - select: - kind: StorageClass - name: nfs - fieldPaths: - - parameters.server - - source: - kind: ConfigMap - name: nfs-config - fieldPath: data.NFS_MEDIA_PATH - targets: - - select: - kind: PersistentVolume - name: nfs-media-pv - fieldPaths: - - spec.nfs.path - - select: - kind: StorageClass - name: nfs - fieldPaths: - - parameters.path - - source: - kind: ConfigMap - name: nfs-config - fieldPath: data.NFS_STORAGE_CAPACITY - targets: - - select: - kind: PersistentVolume - name: nfs-media-pv - fieldPaths: - - spec.capacity.storage - -configMapGenerator: - - name: nfs-config - envs: - - config/config.env \ No newline at end of file diff --git a/setup/cluster/nfs/kustomize.template/kustomization.yaml b/setup/cluster/nfs/kustomize.template/kustomization.yaml new file mode 100644 index 0000000..929a91a --- /dev/null +++ b/setup/cluster/nfs/kustomize.template/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - persistent-volume.yaml + - storage-class.yaml \ No newline at end of file diff --git a/setup/cluster/nfs/kustomize.template/persistent-volume.yaml b/setup/cluster/nfs/kustomize.template/persistent-volume.yaml new file mode 100644 index 0000000..5773bd3 --- /dev/null +++ b/setup/cluster/nfs/kustomize.template/persistent-volume.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-media-pv + labels: + storage: nfs-media +spec: + capacity: + storage: {{ .cloud.nfs.storageCapacity }} + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + server: {{ .cloud.nfs.host }} + path: {{ .cloud.nfs.mediaPath }} + mountOptions: + - nfsvers=4.1 + - rsize=1048576 + - wsize=1048576 + - hard + - intr + - timeo=600 \ No newline at end of file diff --git a/setup/cluster/nfs/kustomize.template/storage-class.yaml b/setup/cluster/nfs/kustomize.template/storage-class.yaml new file mode 100644 index 0000000..183b2f1 --- /dev/null +++ b/setup/cluster/nfs/kustomize.template/storage-class.yaml @@ -0,0 +1,10 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs +provisioner: nfs +parameters: + server: {{ .cloud.nfs.host }} + path: {{ .cloud.nfs.mediaPath }} +reclaimPolicy: Retain +allowVolumeExpansion: true diff --git a/setup/cluster/nfs/kustomize/kustomization.yaml b/setup/cluster/nfs/kustomize/kustomization.yaml new file mode 100644 index 0000000..929a91a --- /dev/null +++ b/setup/cluster/nfs/kustomize/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - persistent-volume.yaml + - storage-class.yaml \ No newline at end of file diff --git a/setup/cluster/nfs/persistent-volume.yaml b/setup/cluster/nfs/kustomize/persistent-volume.yaml similarity index 83% rename from setup/cluster/nfs/persistent-volume.yaml rename to setup/cluster/nfs/kustomize/persistent-volume.yaml index 2991e54..8516370 100644 --- a/setup/cluster/nfs/persistent-volume.yaml +++ b/setup/cluster/nfs/kustomize/persistent-volume.yaml @@ -6,14 +6,14 @@ metadata: storage: nfs-media spec: capacity: - storage: REPLACE_ME + storage: 50Gi accessModes: - ReadWriteMany persistentVolumeReclaimPolicy: Retain storageClassName: nfs nfs: - server: REPLACE_ME - path: REPLACE_ME + server: box-01 + path: /data/media mountOptions: - nfsvers=4.1 - rsize=1048576 diff --git a/setup/cluster/nfs/storage-class.yaml b/setup/cluster/nfs/kustomize/storage-class.yaml similarity index 78% rename from setup/cluster/nfs/storage-class.yaml rename to setup/cluster/nfs/kustomize/storage-class.yaml index 2647393..b21bf3a 100644 --- a/setup/cluster/nfs/storage-class.yaml +++ b/setup/cluster/nfs/kustomize/storage-class.yaml @@ -4,7 +4,7 @@ metadata: name: nfs provisioner: nfs parameters: - server: REPLACE_ME - path: REPLACE_ME + server: box-01 + path: /data/media reclaimPolicy: Retain allowVolumeExpansion: true diff --git a/setup/cluster/setup-nfs-host.sh b/setup/cluster/nfs/setup-nfs-host.sh similarity index 100% rename from setup/cluster/setup-nfs-host.sh rename to setup/cluster/nfs/setup-nfs-host.sh diff --git a/setup/cluster/setup-all.sh b/setup/cluster/setup-all.sh deleted file mode 100755 index 0309573..0000000 --- a/setup/cluster/setup-all.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -set -e - -# Navigate to script directory -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -echo "Setting up infrastructure components for k3s..." - -# Make all script files executable -chmod +x *.sh - -# Utils -./setup-utils.sh - -# Setup MetalLB (must be first for IP allocation) -./setup-metallb.sh - -# Setup Longhorn -./setup-longhorn.sh - -# Setup Traefik -./setup-traefik.sh - -# Setup CoreDNS -./setup-coredns.sh - -# Setup cert-manager -./setup-cert-manager.sh - -# Setup ExternalDNS -./setup-externaldns.sh - -# Setup Kubernetes Dashboard -./setup-dashboard.sh - -# Setup NFS Kubernetes integration (optional) -./setup-nfs.sh - -# Setup Docker Registry -./setup-registry.sh - -echo "Infrastructure setup complete!" -echo -echo "Next steps:" -echo "1. Install Helm charts for non-infrastructure components" -echo "2. Access the dashboard at: https://dashboard.internal.${DOMAIN}" -echo "3. Get the dashboard token with: ./bin/dashboard-token" -echo -echo "To verify components, run:" -echo "- kubectl get pods -n cert-manager" -echo "- kubectl get pods -n externaldns" -echo "- kubectl get pods -n kubernetes-dashboard" -echo "- kubectl get clusterissuers" \ No newline at end of file diff --git a/setup/cluster/setup-coredns.sh b/setup/cluster/setup-coredns.sh deleted file mode 100755 index 129b27c..0000000 --- a/setup/cluster/setup-coredns.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -set -e - -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh -fi - -echo "Setting up CoreDNS for k3s..." -echo "Script directory: ${SCRIPT_DIR}" -echo "Current directory: $(pwd)" - -# Apply the k3s-compatible custom DNS override (k3s will preserve this) -echo "Applying CoreDNS custom override configuration..." -cat "${SCRIPT_DIR}/coredns/coredns-custom-config.yaml" | envsubst | kubectl apply -f - - -# Apply the LoadBalancer service for external access to CoreDNS -echo "Applying CoreDNS service configuration..." -cat "${SCRIPT_DIR}/coredns/coredns-lb-service.yaml" | envsubst | kubectl apply -f - - -# Restart CoreDNS pods to apply the changes -echo "Restarting CoreDNS pods to apply changes..." -kubectl rollout restart deployment/coredns -n kube-system -kubectl rollout status deployment/coredns -n kube-system - -echo "CoreDNS setup complete!" diff --git a/setup/cluster/setup-dashboard.sh b/setup/cluster/setup-dashboard.sh deleted file mode 100755 index 52fa3e8..0000000 --- a/setup/cluster/setup-dashboard.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -set -e - -# Store the script directory path for later use -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh -fi - -echo "Setting up Kubernetes Dashboard..." - -NAMESPACE="kubernetes-dashboard" - -# Apply the official dashboard installation -echo "Installing Kubernetes Dashboard core components..." -kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml - -# Copying cert-manager secrets to the dashboard namespace -copy-secret cert-manager:wildcard-internal-wild-cloud-tls $NAMESPACE -copy-secret cert-manager:wildcard-wild-cloud-tls $NAMESPACE - -# Create admin service account and token -echo "Creating dashboard admin service account and token..." -cat "${SCRIPT_DIR}/kubernetes-dashboard/dashboard-admin-rbac.yaml" | kubectl apply -f - - -# Apply the dashboard configuration -echo "Applying dashboard configuration..." -cat "${SCRIPT_DIR}/kubernetes-dashboard/dashboard-kube-system.yaml" | envsubst | kubectl apply -f - - -# Restart CoreDNS to pick up the changes -kubectl delete pods -n kube-system -l k8s-app=kube-dns -echo "Restarted CoreDNS to pick up DNS changes" - -# Wait for dashboard to be ready -echo "Waiting for Kubernetes Dashboard to be ready..." -kubectl rollout status deployment/kubernetes-dashboard -n $NAMESPACE --timeout=60s - -echo "Kubernetes Dashboard setup complete!" -echo "Access the dashboard at: https://dashboard.internal.${DOMAIN}" -echo "" -echo "To get the authentication token, run:" -echo "./bin/dashboard-token" diff --git a/setup/cluster/setup-externaldns.sh b/setup/cluster/setup-externaldns.sh deleted file mode 100755 index 53656ac..0000000 --- a/setup/cluster/setup-externaldns.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash -set -e - -# Navigate to script directory -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh -fi - -echo "Setting up ExternalDNS..." - -# Create externaldns namespace -kubectl create namespace externaldns --dry-run=client -o yaml | kubectl apply -f - - -# Setup Cloudflare API token secret for ExternalDNS -if [[ -n "${CLOUDFLARE_API_TOKEN}" ]]; then - echo "Creating Cloudflare API token secret..." - kubectl create secret generic cloudflare-api-token \ - --namespace externaldns \ - --from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \ - --dry-run=client -o yaml | kubectl apply -f - -else - echo "Error: CLOUDFLARE_API_TOKEN not set. ExternalDNS will not work correctly." - exit 1 -fi - -# Apply common RBAC resources -echo "Deploying ExternalDNS RBAC resources..." -cat ${SCRIPT_DIR}/externaldns/externaldns-rbac.yaml | envsubst | kubectl apply -f - - -# Apply ExternalDNS manifests with environment variables -echo "Deploying ExternalDNS for external DNS (Cloudflare)..." -cat ${SCRIPT_DIR}/externaldns/externaldns-cloudflare.yaml | envsubst | kubectl apply -f - - -# Wait for ExternalDNS to be ready -echo "Waiting for Cloudflare ExternalDNS to be ready..." -kubectl rollout status deployment/external-dns -n externaldns --timeout=60s - -# echo "Waiting for CoreDNS ExternalDNS to be ready..." -# kubectl rollout status deployment/external-dns-coredns -n externaldns --timeout=60s - -echo "ExternalDNS setup complete!" -echo "" -echo "To verify the installation:" -echo " kubectl get pods -n externaldns" -echo " kubectl logs -n externaldns -l app=external-dns -f" -echo " kubectl logs -n externaldns -l app=external-dns-coredns -f" diff --git a/setup/cluster/setup-longhorn.sh b/setup/cluster/setup-longhorn.sh deleted file mode 100755 index b69a386..0000000 --- a/setup/cluster/setup-longhorn.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh -fi - -echo "Setting up Longhorn..." - -# Apply Longhorn with kustomize to apply our customizations -kubectl apply -k ${SCRIPT_DIR}/longhorn/ - -echo "Longhorn setup complete!" diff --git a/setup/cluster/setup-registry.sh b/setup/cluster/setup-registry.sh deleted file mode 100755 index 14c9e0c..0000000 --- a/setup/cluster/setup-registry.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -# Navigate to script directory -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" - -echo "Setting up Docker Registry..." - -# Apply the docker registry manifests using kustomize -kubectl apply -k "${SCRIPT_DIR}/docker-registry" - -echo "Waiting for Docker Registry to be ready..." -kubectl wait --for=condition=available --timeout=300s deployment/docker-registry -n docker-registry - -echo "Docker Registry setup complete!" - -# Show deployment status -kubectl get pods -n docker-registry -kubectl get services -n docker-registry \ No newline at end of file diff --git a/setup/cluster/setup-traefik.sh b/setup/cluster/setup-traefik.sh deleted file mode 100755 index 17e4c1b..0000000 --- a/setup/cluster/setup-traefik.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" -cd "$SCRIPT_DIR" - -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh -fi - -echo "Setting up Traefik service and middleware for k3s..." - -cat ${SCRIPT_DIR}/traefik/traefik-service.yaml | envsubst | kubectl apply -f - -cat ${SCRIPT_DIR}/traefik/internal-middleware.yaml | envsubst | kubectl apply -f - - -echo "Traefik setup complete!" diff --git a/setup/cluster/traefik/README.md b/setup/cluster/traefik/README.md index bd599b6..85fed8a 100644 --- a/setup/cluster/traefik/README.md +++ b/setup/cluster/traefik/README.md @@ -5,3 +5,27 @@ Ingress RDs can be create for any service. The routes specificed in the Ingress are added automatically to the Traefik proxy. Traefik serves all incoming network traffic on ports 80 and 443 to their appropriate services based on the route. + +## Notes + +These kustomize templates were created with: + +```bash +helm-chart-to-kustomize traefik/traefik traefik traefik values.yaml +``` + +With values.yaml being: + +```yaml +ingressRoute: + dashboard: + enabled: true + matchRule: Host(`dashboard.localhost`) + entryPoints: + - web +providers: + kubernetesGateway: + enabled: true +gateway: + namespacePolicy: All +``` diff --git a/setup/cluster/traefik/install.sh b/setup/cluster/traefik/install.sh new file mode 100755 index 0000000..8416b81 --- /dev/null +++ b/setup/cluster/traefik/install.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +TRAEFIK_DIR="${CLUSTER_SETUP_DIR}/traefik" + +echo "Setting up Traefik ingress controller..." + +# Install required CRDs first +echo "Installing Gateway API CRDs..." +kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml + +echo "Installing Traefik CRDs..." +kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v3.4/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml + +echo "Waiting for CRDs to be established..." +kubectl wait --for condition=established crd/gateways.gateway.networking.k8s.io --timeout=60s +kubectl wait --for condition=established crd/gatewayclasses.gateway.networking.k8s.io --timeout=60s +kubectl wait --for condition=established crd/ingressroutes.traefik.io --timeout=60s +kubectl wait --for condition=established crd/middlewares.traefik.io --timeout=60s + +# Process templates with wild-compile-template-dir +echo "Processing Traefik templates..." +wild-compile-template-dir --clean ${TRAEFIK_DIR}/kustomize.template ${TRAEFIK_DIR}/kustomize + +# Apply Traefik using kustomize +echo "Deploying Traefik..." +kubectl apply -k ${TRAEFIK_DIR}/kustomize + +# Wait for Traefik to be ready +echo "Waiting for Traefik to be ready..." +kubectl wait --for=condition=Available deployment/traefik -n traefik --timeout=120s + + +echo "✅ Traefik setup complete!" +echo "" +echo "To verify the installation:" +echo " kubectl get pods -n traefik" +echo " kubectl get svc -n traefik" diff --git a/setup/cluster/traefik/internal-middleware.yaml b/setup/cluster/traefik/kustomize.template/internal-middleware.yaml similarity index 100% rename from setup/cluster/traefik/internal-middleware.yaml rename to setup/cluster/traefik/kustomize.template/internal-middleware.yaml diff --git a/setup/cluster/traefik/kustomize.template/kustomization.yaml b/setup/cluster/traefik/kustomize.template/kustomization.yaml new file mode 100644 index 0000000..5bc2480 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- namespace.yaml +- templates/deployment.yaml +- templates/gatewayclass.yaml +- templates/gateway.yaml +- templates/ingressclass.yaml +- templates/ingressroute.yaml +- templates/rbac/clusterrolebinding.yaml +- templates/rbac/clusterrole.yaml +- templates/rbac/serviceaccount.yaml +- templates/service.yaml diff --git a/setup/cluster/traefik/kustomize.template/namespace.yaml b/setup/cluster/traefik/kustomize.template/namespace.yaml new file mode 100644 index 0000000..c088a91 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: traefik diff --git a/setup/cluster/traefik/kustomize.template/templates/deployment.yaml b/setup/cluster/traefik/kustomize.template/templates/deployment.yaml new file mode 100644 index 0000000..7b87880 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/deployment.yaml @@ -0,0 +1,130 @@ +--- +# Source: traefik/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: traefik + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + annotations: +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + minReadySeconds: 0 + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "9100" + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + spec: + serviceAccountName: traefik + automountServiceAccountToken: true + terminationGracePeriodSeconds: 60 + hostNetwork: false + containers: + - image: docker.io/traefik:v3.4.1 + imagePullPolicy: IfNotPresent + name: traefik + resources: + readinessProbe: + httpGet: + path: /ping + port: 8080 + scheme: HTTP + failureThreshold: 1 + initialDelaySeconds: 2 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: /ping + port: 8080 + scheme: HTTP + failureThreshold: 3 + initialDelaySeconds: 2 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + lifecycle: + ports: + - name: metrics + containerPort: 9100 + protocol: TCP + - name: traefik + containerPort: 8080 + protocol: TCP + - name: web + containerPort: 8000 + protocol: TCP + - name: websecure + containerPort: 8443 + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: data + mountPath: /data + - name: tmp + mountPath: /tmp + args: + - "--global.checkNewVersion" + - "--entryPoints.metrics.address=:9100/tcp" + - "--entryPoints.traefik.address=:8080/tcp" + - "--entryPoints.web.address=:8000/tcp" + - "--entryPoints.websecure.address=:8443/tcp" + - "--api.dashboard=true" + - "--ping=true" + - "--metrics.prometheus=true" + - "--metrics.prometheus.entrypoint=metrics" + - "--providers.kubernetescrd" + - "--providers.kubernetescrd.allowEmptyServices=true" + - "--providers.kubernetesingress" + - "--providers.kubernetesingress.allowEmptyServices=true" + - "--providers.kubernetesingress.ingressendpoint.publishedservice=traefik/traefik" + - "--providers.kubernetesgateway" + - "--providers.kubernetesgateway.statusaddress.service.name=traefik" + - "--providers.kubernetesgateway.statusaddress.service.namespace=traefik" + - "--entryPoints.websecure.http.tls=true" + - "--log.level=INFO" + + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: data + emptyDir: {} + - name: tmp + emptyDir: {} + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 diff --git a/setup/cluster/traefik/kustomize.template/templates/gateway.yaml b/setup/cluster/traefik/kustomize.template/templates/gateway.yaml new file mode 100644 index 0000000..9a42408 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/gateway.yaml @@ -0,0 +1,18 @@ +--- +# Source: traefik/templates/gateway.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: traefik-gateway + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +spec: + gatewayClassName: traefik + listeners: + - name: web + port: 8000 + protocol: HTTP diff --git a/setup/cluster/traefik/kustomize.template/templates/gatewayclass.yaml b/setup/cluster/traefik/kustomize.template/templates/gatewayclass.yaml new file mode 100644 index 0000000..69487c0 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/gatewayclass.yaml @@ -0,0 +1,13 @@ +--- +# Source: traefik/templates/gatewayclass.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +spec: + controllerName: traefik.io/gateway-controller diff --git a/setup/cluster/traefik/kustomize.template/templates/ingressclass.yaml b/setup/cluster/traefik/kustomize.template/templates/ingressclass.yaml new file mode 100644 index 0000000..b283328 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/ingressclass.yaml @@ -0,0 +1,15 @@ +--- +# Source: traefik/templates/ingressclass.yaml +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + annotations: + ingressclass.kubernetes.io/is-default-class: "true" + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + name: traefik +spec: + controller: traefik.io/ingress-controller diff --git a/setup/cluster/traefik/kustomize.template/templates/ingressroute.yaml b/setup/cluster/traefik/kustomize.template/templates/ingressroute.yaml new file mode 100644 index 0000000..7682cb5 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/ingressroute.yaml @@ -0,0 +1,21 @@ +--- +# Source: traefik/templates/ingressroute.yaml +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: traefik-dashboard + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +spec: + entryPoints: + - web + routes: + - match: Host(`dashboard.localhost`) + kind: Rule + services: + - kind: TraefikService + name: api@internal diff --git a/setup/cluster/traefik/kustomize.template/templates/rbac/clusterrole.yaml b/setup/cluster/traefik/kustomize.template/templates/rbac/clusterrole.yaml new file mode 100644 index 0000000..333fa7c --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/rbac/clusterrole.yaml @@ -0,0 +1,108 @@ +--- +# Source: traefik/templates/rbac/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: traefik-traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - "" + resources: + - configmaps + - nodes + - services + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingressclasses + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - traefik.io + resources: + - ingressroutes + - ingressroutetcps + - ingressrouteudps + - middlewares + - middlewaretcps + - serverstransports + - serverstransporttcps + - tlsoptions + - tlsstores + - traefikservices + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - namespaces + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - gateway.networking.k8s.io + resources: + - backendtlspolicies + - gatewayclasses + - gateways + - grpcroutes + - httproutes + - referencegrants + - tcproutes + - tlsroutes + verbs: + - get + - list + - watch + - apiGroups: + - gateway.networking.k8s.io + resources: + - backendtlspolicies/status + - gatewayclasses/status + - gateways/status + - grpcroutes/status + - httproutes/status + - tcproutes/status + - tlsroutes/status + verbs: + - update diff --git a/setup/cluster/traefik/kustomize.template/templates/rbac/clusterrolebinding.yaml b/setup/cluster/traefik/kustomize.template/templates/rbac/clusterrolebinding.yaml new file mode 100644 index 0000000..3c57229 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/rbac/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +--- +# Source: traefik/templates/rbac/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: traefik-traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: traefik-traefik +subjects: + - kind: ServiceAccount + name: traefik + namespace: traefik diff --git a/setup/cluster/traefik/kustomize.template/templates/rbac/serviceaccount.yaml b/setup/cluster/traefik/kustomize.template/templates/rbac/serviceaccount.yaml new file mode 100644 index 0000000..3ab2406 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/rbac/serviceaccount.yaml @@ -0,0 +1,14 @@ +--- +# Source: traefik/templates/rbac/serviceaccount.yaml +kind: ServiceAccount +apiVersion: v1 +metadata: + name: traefik + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + annotations: +automountServiceAccountToken: false diff --git a/setup/cluster/traefik/kustomize.template/templates/service.yaml b/setup/cluster/traefik/kustomize.template/templates/service.yaml new file mode 100644 index 0000000..73a4aa9 --- /dev/null +++ b/setup/cluster/traefik/kustomize.template/templates/service.yaml @@ -0,0 +1,27 @@ +--- +# Source: traefik/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: traefik + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: LoadBalancer + selector: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + ports: + - port: 80 + name: web + targetPort: web + protocol: TCP + - port: 443 + name: websecure + targetPort: websecure + protocol: TCP diff --git a/setup/cluster/traefik/traefik-service.yaml b/setup/cluster/traefik/kustomize.template/traefik-service.yaml similarity index 90% rename from setup/cluster/traefik/traefik-service.yaml rename to setup/cluster/traefik/kustomize.template/traefik-service.yaml index e0aaa6c..883a03d 100644 --- a/setup/cluster/traefik/traefik-service.yaml +++ b/setup/cluster/traefik/kustomize.template/traefik-service.yaml @@ -14,7 +14,7 @@ metadata: app.kubernetes.io/name: traefik spec: type: LoadBalancer - loadBalancerIP: 192.168.8.240 + loadBalancerIP: { { .cluster.loadBalancerIP } } selector: app.kubernetes.io/instance: traefik-kube-system app.kubernetes.io/name: traefik @@ -22,8 +22,7 @@ spec: - name: web port: 80 targetPort: web - - name: websecure + - name: websecure port: 443 targetPort: websecure externalTrafficPolicy: Local - \ No newline at end of file diff --git a/setup/cluster/traefik/kustomize/internal-middleware.yaml b/setup/cluster/traefik/kustomize/internal-middleware.yaml new file mode 100644 index 0000000..264bb6e --- /dev/null +++ b/setup/cluster/traefik/kustomize/internal-middleware.yaml @@ -0,0 +1,13 @@ +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: internal-only + namespace: kube-system +spec: + ipWhiteList: + # Restrict to local private network ranges - adjust these to match your network + sourceRange: + - 127.0.0.1/32 # localhost + - 10.0.0.0/8 # Private network + - 172.16.0.0/12 # Private network + - 192.168.0.0/16 # Private network \ No newline at end of file diff --git a/setup/cluster/traefik/kustomize/kustomization.yaml b/setup/cluster/traefik/kustomize/kustomization.yaml new file mode 100644 index 0000000..5bc2480 --- /dev/null +++ b/setup/cluster/traefik/kustomize/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- namespace.yaml +- templates/deployment.yaml +- templates/gatewayclass.yaml +- templates/gateway.yaml +- templates/ingressclass.yaml +- templates/ingressroute.yaml +- templates/rbac/clusterrolebinding.yaml +- templates/rbac/clusterrole.yaml +- templates/rbac/serviceaccount.yaml +- templates/service.yaml diff --git a/setup/cluster/traefik/kustomize/namespace.yaml b/setup/cluster/traefik/kustomize/namespace.yaml new file mode 100644 index 0000000..c088a91 --- /dev/null +++ b/setup/cluster/traefik/kustomize/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: traefik diff --git a/setup/cluster/traefik/kustomize/templates/deployment.yaml b/setup/cluster/traefik/kustomize/templates/deployment.yaml new file mode 100644 index 0000000..7b87880 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/deployment.yaml @@ -0,0 +1,130 @@ +--- +# Source: traefik/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: traefik + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + annotations: +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 1 + minReadySeconds: 0 + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + prometheus.io/port: "9100" + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + spec: + serviceAccountName: traefik + automountServiceAccountToken: true + terminationGracePeriodSeconds: 60 + hostNetwork: false + containers: + - image: docker.io/traefik:v3.4.1 + imagePullPolicy: IfNotPresent + name: traefik + resources: + readinessProbe: + httpGet: + path: /ping + port: 8080 + scheme: HTTP + failureThreshold: 1 + initialDelaySeconds: 2 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + httpGet: + path: /ping + port: 8080 + scheme: HTTP + failureThreshold: 3 + initialDelaySeconds: 2 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + lifecycle: + ports: + - name: metrics + containerPort: 9100 + protocol: TCP + - name: traefik + containerPort: 8080 + protocol: TCP + - name: web + containerPort: 8000 + protocol: TCP + - name: websecure + containerPort: 8443 + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: data + mountPath: /data + - name: tmp + mountPath: /tmp + args: + - "--global.checkNewVersion" + - "--entryPoints.metrics.address=:9100/tcp" + - "--entryPoints.traefik.address=:8080/tcp" + - "--entryPoints.web.address=:8000/tcp" + - "--entryPoints.websecure.address=:8443/tcp" + - "--api.dashboard=true" + - "--ping=true" + - "--metrics.prometheus=true" + - "--metrics.prometheus.entrypoint=metrics" + - "--providers.kubernetescrd" + - "--providers.kubernetescrd.allowEmptyServices=true" + - "--providers.kubernetesingress" + - "--providers.kubernetesingress.allowEmptyServices=true" + - "--providers.kubernetesingress.ingressendpoint.publishedservice=traefik/traefik" + - "--providers.kubernetesgateway" + - "--providers.kubernetesgateway.statusaddress.service.name=traefik" + - "--providers.kubernetesgateway.statusaddress.service.namespace=traefik" + - "--entryPoints.websecure.http.tls=true" + - "--log.level=INFO" + + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: data + emptyDir: {} + - name: tmp + emptyDir: {} + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 diff --git a/setup/cluster/traefik/kustomize/templates/gateway.yaml b/setup/cluster/traefik/kustomize/templates/gateway.yaml new file mode 100644 index 0000000..9a42408 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/gateway.yaml @@ -0,0 +1,18 @@ +--- +# Source: traefik/templates/gateway.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: traefik-gateway + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +spec: + gatewayClassName: traefik + listeners: + - name: web + port: 8000 + protocol: HTTP diff --git a/setup/cluster/traefik/kustomize/templates/gatewayclass.yaml b/setup/cluster/traefik/kustomize/templates/gatewayclass.yaml new file mode 100644 index 0000000..69487c0 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/gatewayclass.yaml @@ -0,0 +1,13 @@ +--- +# Source: traefik/templates/gatewayclass.yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +spec: + controllerName: traefik.io/gateway-controller diff --git a/setup/cluster/traefik/kustomize/templates/ingressclass.yaml b/setup/cluster/traefik/kustomize/templates/ingressclass.yaml new file mode 100644 index 0000000..b283328 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/ingressclass.yaml @@ -0,0 +1,15 @@ +--- +# Source: traefik/templates/ingressclass.yaml +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + annotations: + ingressclass.kubernetes.io/is-default-class: "true" + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + name: traefik +spec: + controller: traefik.io/ingress-controller diff --git a/setup/cluster/traefik/kustomize/templates/ingressroute.yaml b/setup/cluster/traefik/kustomize/templates/ingressroute.yaml new file mode 100644 index 0000000..7682cb5 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/ingressroute.yaml @@ -0,0 +1,21 @@ +--- +# Source: traefik/templates/ingressroute.yaml +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: traefik-dashboard + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +spec: + entryPoints: + - web + routes: + - match: Host(`dashboard.localhost`) + kind: Rule + services: + - kind: TraefikService + name: api@internal diff --git a/setup/cluster/traefik/kustomize/templates/rbac/clusterrole.yaml b/setup/cluster/traefik/kustomize/templates/rbac/clusterrole.yaml new file mode 100644 index 0000000..333fa7c --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/rbac/clusterrole.yaml @@ -0,0 +1,108 @@ +--- +# Source: traefik/templates/rbac/clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: traefik-traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +rules: + - apiGroups: + - "" + resources: + - configmaps + - nodes + - services + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingressclasses + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - traefik.io + resources: + - ingressroutes + - ingressroutetcps + - ingressrouteudps + - middlewares + - middlewaretcps + - serverstransports + - serverstransporttcps + - tlsoptions + - tlsstores + - traefikservices + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - namespaces + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - gateway.networking.k8s.io + resources: + - backendtlspolicies + - gatewayclasses + - gateways + - grpcroutes + - httproutes + - referencegrants + - tcproutes + - tlsroutes + verbs: + - get + - list + - watch + - apiGroups: + - gateway.networking.k8s.io + resources: + - backendtlspolicies/status + - gatewayclasses/status + - gateways/status + - grpcroutes/status + - httproutes/status + - tcproutes/status + - tlsroutes/status + verbs: + - update diff --git a/setup/cluster/traefik/kustomize/templates/rbac/clusterrolebinding.yaml b/setup/cluster/traefik/kustomize/templates/rbac/clusterrolebinding.yaml new file mode 100644 index 0000000..3c57229 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/rbac/clusterrolebinding.yaml @@ -0,0 +1,19 @@ +--- +# Source: traefik/templates/rbac/clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: traefik-traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: traefik-traefik +subjects: + - kind: ServiceAccount + name: traefik + namespace: traefik diff --git a/setup/cluster/traefik/kustomize/templates/rbac/serviceaccount.yaml b/setup/cluster/traefik/kustomize/templates/rbac/serviceaccount.yaml new file mode 100644 index 0000000..3ab2406 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/rbac/serviceaccount.yaml @@ -0,0 +1,14 @@ +--- +# Source: traefik/templates/rbac/serviceaccount.yaml +kind: ServiceAccount +apiVersion: v1 +metadata: + name: traefik + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + annotations: +automountServiceAccountToken: false diff --git a/setup/cluster/traefik/kustomize/templates/service.yaml b/setup/cluster/traefik/kustomize/templates/service.yaml new file mode 100644 index 0000000..73a4aa9 --- /dev/null +++ b/setup/cluster/traefik/kustomize/templates/service.yaml @@ -0,0 +1,27 @@ +--- +# Source: traefik/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: traefik + namespace: traefik + labels: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + helm.sh/chart: traefik-36.1.0 + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: LoadBalancer + selector: + app.kubernetes.io/name: traefik + app.kubernetes.io/instance: traefik-traefik + ports: + - port: 80 + name: web + targetPort: web + protocol: TCP + - port: 443 + name: websecure + targetPort: websecure + protocol: TCP diff --git a/setup/cluster/traefik/kustomize/traefik-service.yaml b/setup/cluster/traefik/kustomize/traefik-service.yaml new file mode 100644 index 0000000..883a03d --- /dev/null +++ b/setup/cluster/traefik/kustomize/traefik-service.yaml @@ -0,0 +1,28 @@ +--- +# Traefik service configuration with static LoadBalancer IP +apiVersion: v1 +kind: Service +metadata: + name: traefik + namespace: kube-system + annotations: + # Get a stable IP from MetalLB + metallb.universe.tf/address-pool: production + metallb.universe.tf/allow-shared-ip: traefik-lb + labels: + app.kubernetes.io/instance: traefik-kube-system + app.kubernetes.io/name: traefik +spec: + type: LoadBalancer + loadBalancerIP: { { .cluster.loadBalancerIP } } + selector: + app.kubernetes.io/instance: traefik-kube-system + app.kubernetes.io/name: traefik + ports: + - name: web + port: 80 + targetPort: web + - name: websecure + port: 443 + targetPort: websecure + externalTrafficPolicy: Local diff --git a/setup/cluster/utils/README.md b/setup/cluster/utils/README.md new file mode 100644 index 0000000..e69de29 diff --git a/setup/cluster/utils/install.sh b/setup/cluster/utils/install.sh new file mode 100755 index 0000000..4b3d10b --- /dev/null +++ b/setup/cluster/utils/install.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi + +CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster" +UTILS_DIR="${CLUSTER_SETUP_DIR}/utils" + +echo "Setting up cluster utilities..." + +# Process templates with wild-compile-template-dir +echo "Processing utils templates..." +wild-compile-template-dir --clean ${UTILS_DIR}/kustomize.template ${UTILS_DIR}/kustomize + +echo "Applying utility manifests..." +kubectl apply -f ${UTILS_DIR}/kustomize/ + +echo "✅ Cluster utilities setup complete!" \ No newline at end of file diff --git a/setup/cluster/utils/netdebug.yaml b/setup/cluster/utils/kustomize.template/netdebug.yaml similarity index 100% rename from setup/cluster/utils/netdebug.yaml rename to setup/cluster/utils/kustomize.template/netdebug.yaml diff --git a/setup/cluster/validate-setup.sh b/setup/cluster/validate-setup.sh index 065b4c9..e3e0aaa 100755 --- a/setup/cluster/validate-setup.sh +++ b/setup/cluster/validate-setup.sh @@ -1,7 +1,11 @@ #!/bin/bash set -e -# FIXME: Need to template out the 192.168 addresses. +# Check if WC_HOME is set (wildcloud environment sourced) +if [ -z "${WC_HOME}" ]; then + echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)" + exit 1 +fi # Navigate to script directory SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")" @@ -9,11 +13,6 @@ SCRIPT_DIR="$(dirname "$SCRIPT_PATH")" ROOT_DIR="$(dirname "$SCRIPT_DIR")" cd "$SCRIPT_DIR" -# Source environment variables -if [[ -f "../load-env.sh" ]]; then - source ../load-env.sh -fi - # Define colors for better readability GREEN='\033[0;32m' YELLOW='\033[1;33m' @@ -23,6 +22,20 @@ CYAN='\033[0;36m' BOLD='\033[1m' NC='\033[0m' # No Color +# Get configuration from wild-config +DOMAIN=$(wild-config cloud.domain) +INTERNAL_DOMAIN=$(wild-config cloud.internalDomain) +OPERATOR_EMAIL=$(wild-config operator.email) +DNS_IP=$(wild-config cloud.dns.ip) +ROUTER_IP=$(wild-config cloud.router.ip) + +# Validate required configuration +if [[ -z "$DOMAIN" || -z "$INTERNAL_DOMAIN" ]]; then + echo "Error: Unable to get domain configuration from wild-config" + echo "Please ensure your config.yaml is properly configured" + exit 1 +fi + # Array to collect issues we found declare -a ISSUES_FOUND @@ -32,12 +45,14 @@ echo -e "${BLUE}============================================================${NC # Display a summary of what will be validated echo -e "${CYAN}This script will validate the following components:${NC}" -echo -e "• ${YELLOW}Core components:${NC} MetalLB, Traefik, CoreDNS (k3s provided components)" -echo -e "• ${YELLOW}Installed components:${NC} cert-manager, ExternalDNS, Kubernetes Dashboard" +echo -e "• ${YELLOW}Core components:${NC} MetalLB, Traefik, CoreDNS (Talos/Kubernetes components)" +echo -e "• ${YELLOW}Installed components:${NC} cert-manager, ExternalDNS, Kubernetes Dashboard, Longhorn" echo -e "• ${YELLOW}DNS resolution:${NC} Internal domain names and dashboard access" echo -e "• ${YELLOW}Routing:${NC} IngressRoutes, middlewares, and services" echo -e "• ${YELLOW}Authentication:${NC} Service accounts and tokens" +echo -e "• ${YELLOW}Storage:${NC} Longhorn storage system and persistent volumes" echo -e "• ${YELLOW}Load balancing:${NC} IP address pools and allocations" +echo -e "• ${YELLOW}Certificates:${NC} Let's Encrypt wildcard certificates" echo echo -e "${CYAN}The validation will create a test pod 'validation-test' that will remain running${NC}" echo -e "${CYAN}after the script finishes, for further troubleshooting if needed.${NC}" @@ -291,8 +306,8 @@ show_component_logs() { echo -e "${BLUE}=== Checking Core Components ===${NC}" # Check MetalLB components - using correct label selectors -check_component "MetalLB Controller" "metallb-system" "app.kubernetes.io/component=controller,app.kubernetes.io/name=metallb" -check_component "MetalLB Speaker" "metallb-system" "app.kubernetes.io/component=speaker,app.kubernetes.io/name=metallb" +check_component "MetalLB Controller" "metallb-system" "app=metallb,component=controller" +check_component "MetalLB Speaker" "metallb-system" "app=metallb,component=speaker" # Check MetalLB IP address pools echo -e "${YELLOW}Checking MetalLB IP address pools...${NC}" @@ -371,10 +386,59 @@ else ISSUES_FOUND+=("Error querying LoadBalancer services") fi -# Check k3s components -check_component "Traefik" "kube-system" "app.kubernetes.io/name=traefik,app.kubernetes.io/instance=traefik-kube-system" +# Check Talos/Kubernetes core components +check_component "Traefik" "traefik" "app.kubernetes.io/name=traefik,app.kubernetes.io/instance=traefik-traefik" check_component "CoreDNS" "kube-system" "k8s-app=kube-dns" +# Check additional storage components +check_component "Longhorn Manager" "longhorn-system" "app=longhorn-manager" +check_component "Longhorn UI" "longhorn-system" "app=longhorn-ui" +check_component "Docker Registry" "docker-registry" "app=docker-registry" + +echo + +echo -e "${BLUE}=== Checking Storage Components ===${NC}" +# Check Longhorn storage +echo -e "${YELLOW}Checking Longhorn storage system...${NC}" +LONGHORN_NODES=$(kubectl get nodes.longhorn.io -n longhorn-system -o json 2>/dev/null | jq '.items | length' 2>/dev/null || echo "0") +if [[ "$LONGHORN_NODES" -gt 0 ]]; then + echo -e " ${GREEN}✓ Longhorn found $LONGHORN_NODES storage nodes${NC}" + + # Check storage classes + LONGHORN_SC=$(kubectl get storageclass longhorn -o name 2>/dev/null) + if [[ -n "$LONGHORN_SC" ]]; then + echo -e " ${GREEN}✓ Longhorn storage class available${NC}" + + # Check if it's the default + DEFAULT_SC=$(kubectl get storageclass -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}') + if [[ "$DEFAULT_SC" == "longhorn" ]]; then + echo -e " ${GREEN}✓ Longhorn is the default storage class${NC}" + else + echo -e " ${YELLOW}⚠ Longhorn is not the default storage class (default: ${DEFAULT_SC:-none})${NC}" + fi + else + echo -e " ${RED}✗ Longhorn storage class not found${NC}" + ISSUES_FOUND+=("Longhorn storage class not found") + fi + + # Check persistent volumes + PV_COUNT=$(kubectl get pv 2>/dev/null | grep -c "longhorn" || echo "0") + echo -e " ${CYAN}→ $PV_COUNT Longhorn persistent volumes${NC}" +else + echo -e " ${RED}✗ Longhorn storage nodes not found${NC}" + ISSUES_FOUND+=("Longhorn storage system not properly configured") +fi + +# Check NFS storage if configured +NFS_SC=$(kubectl get storageclass nfs -o name 2>/dev/null) +if [[ -n "$NFS_SC" ]]; then + echo -e " ${GREEN}✓ NFS storage class available${NC}" + NFS_PV_COUNT=$(kubectl get pv 2>/dev/null | grep -c "nfs" || echo "0") + echo -e " ${CYAN}→ $NFS_PV_COUNT NFS persistent volumes${NC}" +else + echo -e " ${YELLOW}⚠ NFS storage class not found${NC}" +fi + echo echo -e "${BLUE}=== Checking Installed Components ===${NC}" @@ -383,6 +447,22 @@ check_component "cert-manager" "cert-manager" "app.kubernetes.io/instance=cert-m check_component "ExternalDNS" "externaldns" "app=external-dns" DASHBOARD_CHECK=$(check_component "Kubernetes Dashboard" "kubernetes-dashboard" "k8s-app=kubernetes-dashboard") +# Check certificates +echo -e "${YELLOW}Checking cert-manager certificates...${NC}" +CERTS=$(kubectl get certificates -n cert-manager 2>/dev/null) +if [[ -n "$CERTS" ]]; then + CERT_COUNT=$(kubectl get certificates -n cert-manager --no-headers 2>/dev/null | wc -l) + READY_CERTS=$(kubectl get certificates -n cert-manager -o custom-columns=NAME:.metadata.name,READY:.status.conditions[0].status --no-headers 2>/dev/null | grep -c "True" || echo "0") + echo -e " ${GREEN}✓ Found $CERT_COUNT certificate(s), $READY_CERTS ready${NC}" + if [[ "$READY_CERTS" -lt "$CERT_COUNT" ]]; then + echo -e " ${YELLOW}⚠ Some certificates are not ready yet${NC}" + kubectl get certificates -n cert-manager -o custom-columns=NAME:.metadata.name,READY:.status.conditions[0].status,MESSAGE:.status.conditions[0].message --no-headers | grep -v "True" | sed 's/^/ /' + fi +else + echo -e " ${RED}✗ No certificates found${NC}" + ISSUES_FOUND+=("No certificates found in cert-manager namespace") +fi + echo echo -e "${BLUE}=== Checking DNS Resolution ===${NC}" @@ -400,36 +480,36 @@ if echo "$COREDNS_CONFIG" | grep -q "traefik.${DOMAIN}"; then echo -e " ${CYAN}→ traefik.${DOMAIN} is configured with IP: ${TRAEFIK_IP}${NC}" fi else - echo -e " ${RED}✗ Missing entry for traefik.${DOMAIN} in CoreDNS config${NC}" - ISSUES_FOUND+=("Missing DNS entry for traefik.${DOMAIN} in CoreDNS configmap") + echo -e " ${YELLOW}⚠ Entry for traefik.${DOMAIN} not found in CoreDNS config${NC}" + echo -e " ${YELLOW}This is normal if using different routing methods${NC}" fi # Check for dashboard entry -if echo "$COREDNS_CONFIG" | grep -q "dashboard.internal.${DOMAIN}"; then - echo -e " ${GREEN}✓ Found entry for dashboard.internal.${DOMAIN} in CoreDNS config${NC}" +if echo "$COREDNS_CONFIG" | grep -q "dashboard.${INTERNAL_DOMAIN}"; then + echo -e " ${GREEN}✓ Found entry for dashboard.${INTERNAL_DOMAIN} in CoreDNS config${NC}" # Extract the actual IP from the configmap - DASHBOARD_IP=$(echo "$COREDNS_CONFIG" | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ dashboard\.internal\.${DOMAIN}" | awk '{print $1}') + DASHBOARD_IP=$(echo "$COREDNS_CONFIG" | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ dashboard\.${INTERNAL_DOMAIN}" | awk '{print $1}') if [[ -n "$DASHBOARD_IP" ]]; then - echo -e " ${CYAN}→ dashboard.internal.${DOMAIN} is configured with IP: ${DASHBOARD_IP}${NC}" + echo -e " ${CYAN}→ dashboard.${INTERNAL_DOMAIN} is configured with IP: ${DASHBOARD_IP}${NC}" fi else - echo -e " ${RED}✗ Missing entry for dashboard.internal.${DOMAIN} in CoreDNS config${NC}" - ISSUES_FOUND+=("Missing DNS entry for dashboard.internal.${DOMAIN} in CoreDNS configmap") + echo -e " ${YELLOW}⚠ Entry for dashboard.${INTERNAL_DOMAIN} not found in CoreDNS config${NC}" + echo -e " ${YELLOW}Dashboard may be accessed through ingress routing instead${NC}" fi -# Check for kubernetes-dashboard entry -if echo "$COREDNS_CONFIG" | grep -q "dashboard.internal.${DOMAIN}"; then - echo -e " ${GREEN}✓ Found entry for dashboard.internal.${DOMAIN} in CoreDNS config${NC}" +# Check for docker registry entry +if echo "$COREDNS_CONFIG" | grep -q "docker-registry.${INTERNAL_DOMAIN}"; then + echo -e " ${GREEN}✓ Found entry for docker-registry.${INTERNAL_DOMAIN} in CoreDNS config${NC}" # Extract the actual IP from the configmap - K8S_DASHBOARD_IP=$(echo "$COREDNS_CONFIG" | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ kubernetes-dashboard\.internal\.${DOMAIN}" | awk '{print $1}') - if [[ -n "$K8S_DASHBOARD_IP" ]]; then - echo -e " ${CYAN}→ dashboard.internal.${DOMAIN} is configured with IP: ${K8S_DASHBOARD_IP}${NC}" + REGISTRY_IP=$(echo "$COREDNS_CONFIG" | grep -oE "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ docker-registry\.${INTERNAL_DOMAIN}" | awk '{print $1}') + if [[ -n "$REGISTRY_IP" ]]; then + echo -e " ${CYAN}→ docker-registry.${INTERNAL_DOMAIN} is configured with IP: ${REGISTRY_IP}${NC}" fi else - echo -e " ${YELLOW}Note: dashboard.internal.${DOMAIN} entry not found in CoreDNS config${NC}" - echo -e " ${YELLOW}This is not critical as dashboard.internal.${DOMAIN} is the primary hostname${NC}" + echo -e " ${YELLOW}⚠ Entry for docker-registry.${INTERNAL_DOMAIN} not found in CoreDNS config${NC}" + echo -e " ${YELLOW}Registry may be accessed through ingress routing instead${NC}" fi echo -e "${YELLOW}Note: DNS resolution from within the cluster may be different than external resolution${NC}" @@ -597,21 +677,19 @@ test_full_request_path() { # Check dashboard domains echo -e "${YELLOW}Checking DNS resolution for dashboard domains...${NC}" -# First check primary dashboard domain using the IP we found in CoreDNS config +# Check primary dashboard domain if [[ -n "$DASHBOARD_IP" ]]; then - check_dns_resolution "dashboard.internal.${DOMAIN}" "$DASHBOARD_IP" "true" + check_dns_resolution "dashboard.${INTERNAL_DOMAIN}" "$DASHBOARD_IP" "true" else - # Fall back to hardcoded IP if not found in config - check_dns_resolution "dashboard.internal.${DOMAIN}" "192.168.8.240" "false" || \ - check_coredns_entry "dashboard.internal.${DOMAIN}" "192.168.8.240" + # Check if dashboard is accessible through cluster DNS + check_dns_resolution "dashboard.${INTERNAL_DOMAIN}" "" "true" || true fi -# Also check alternative dashboard domain -if [[ -n "$K8S_DASHBOARD_IP" ]]; then - check_dns_resolution "dashboard.internal.${DOMAIN}" "$K8S_DASHBOARD_IP" "true" +# Also check docker registry domain +if [[ -n "$REGISTRY_IP" ]]; then + check_dns_resolution "docker-registry.${INTERNAL_DOMAIN}" "$REGISTRY_IP" "true" else - # Fall back to the same IP as primary domain if alternate isn't defined - check_dns_resolution "dashboard.internal.${DOMAIN}" "${DASHBOARD_IP:-192.168.8.240}" "true" || true + check_dns_resolution "docker-registry.${INTERNAL_DOMAIN}" "" "true" || true fi # Enhanced DNS tests @@ -620,10 +698,9 @@ echo -e "${YELLOW}Running enhanced DNS and path validation tests...${NC}" # Since external DNS is configured to use the local machine's DNS settings, # we'll skip the external DNS check if it's not working, since that's a client config issue echo -e "${YELLOW}Note: External DNS resolution depends on client DNS configuration${NC}" -echo -e "${YELLOW}If your local DNS server is properly configured to use CoreDNS (192.168.8.241),${NC}" -echo -e "${YELLOW}it should resolve dashboard.internal.${DOMAIN} to 192.168.8.240${NC}" -echo -e "${GREEN}✓ External DNS configuration exists (tested inside cluster)${NC}" -echo -e "${YELLOW}External DNS resolution and HTTP access must be tested manually from your browser.${NC}" +echo -e "${YELLOW}Dashboard and registry should be accessible through ingress routing${NC}" +echo -e "${GREEN}✓ Internal DNS configuration validated${NC}" +echo -e "${YELLOW}External access should be tested manually from your browser.${NC}" # Skip the problematic tests as they depend on client configuration # check_external_dns_resolution "dashboard.internal.${DOMAIN}" "192.168.8.240" @@ -635,7 +712,7 @@ check_coredns_config_applied # Skip HTTP test as it depends on client network configuration echo -e "${YELLOW}Note: HTTP access test skipped - this depends on client network configuration${NC}" echo -e "${GREEN}✓ Dashboard IngressRoute and DNS configuration validated${NC}" -echo -e "${YELLOW}Manually verify you can access https://dashboard.internal.${DOMAIN} in your browser${NC}" +echo -e "${YELLOW}Manually verify you can access https://dashboard.${INTERNAL_DOMAIN} in your browser${NC}" # test_full_request_path "dashboard.internal.${DOMAIN}" "200" echo @@ -644,40 +721,33 @@ echo -e "${BLUE}=== Checking IngressRoutes for Dashboard ===${NC}" # Check if IngressRoutes are properly configured echo -e "${YELLOW}Checking IngressRoutes for the dashboard...${NC}" -# Check IngressRoutes for dashboard in both namespaces +# Check IngressRoutes for dashboard +echo -e "${YELLOW}Checking for dashboard IngressRoutes...${NC}" -# First check kube-system namespace (for cross-namespace routing) -KUBE_SYSTEM_ROUTE_CHECK=$(check_ingressroute "kubernetes-dashboard" "kube-system" "dashboard.internal.${DOMAIN}" "kubernetes-dashboard" "kubernetes-dashboard" || echo "FAILED") -KUBE_SYSTEM_ALT_ROUTE_CHECK=$(check_ingressroute "kubernetes-dashboard-alt" "kube-system" "dashboard.internal.${DOMAIN}" "kubernetes-dashboard" "kubernetes-dashboard" || echo "FAILED") - -# Then check kubernetes-dashboard namespace (for same-namespace routing) -K8S_DASHBOARD_ROUTE_CHECK=$(check_ingressroute "kubernetes-dashboard" "kubernetes-dashboard" "dashboard.internal.${DOMAIN}" "kubernetes-dashboard" || echo "FAILED") -K8S_DASHBOARD_ALT_ROUTE_CHECK=$(check_ingressroute "kubernetes-dashboard-alt" "kubernetes-dashboard" "dashboard.internal.${DOMAIN}" "kubernetes-dashboard" || echo "FAILED") - -# Determine if we have at least one working route for each domain -PRIMARY_DOMAIN_ROUTE_OK=false -if ! echo "$KUBE_SYSTEM_ROUTE_CHECK $K8S_DASHBOARD_ROUTE_CHECK" | grep -q "FAILED FAILED"; then - PRIMARY_DOMAIN_ROUTE_OK=true -fi - -ALT_DOMAIN_ROUTE_OK=false -if ! echo "$KUBE_SYSTEM_ALT_ROUTE_CHECK $K8S_DASHBOARD_ALT_ROUTE_CHECK" | grep -q "FAILED FAILED"; then - ALT_DOMAIN_ROUTE_OK=true -fi - -# Report warnings/issues if needed -if [[ "$PRIMARY_DOMAIN_ROUTE_OK" != "true" ]]; then - echo -e "${RED}✗ No valid IngressRoute found for dashboard.internal.${DOMAIN}${NC}" - ISSUES_FOUND+=("No valid IngressRoute for dashboard.internal.${DOMAIN}") +# Check for IngressRoutes in kubernetes-dashboard namespace +DASHBOARD_INGRESS_COUNT=$(kubectl get ingressroute -n kubernetes-dashboard 2>/dev/null | grep -c "kubernetes-dashboard" || echo "0") +if [[ "$DASHBOARD_INGRESS_COUNT" -gt 0 ]]; then + echo -e " ${GREEN}✓ Found $DASHBOARD_INGRESS_COUNT dashboard IngressRoute(s)${NC}" + kubectl get ingressroute -n kubernetes-dashboard -o custom-columns=NAME:.metadata.name,RULE:.spec.routes[0].match --no-headers | sed 's/^/ /' else - echo -e "${GREEN}✓ Found valid IngressRoute for dashboard.internal.${DOMAIN}${NC}" + echo -e " ${YELLOW}⚠ No IngressRoutes found for dashboard${NC}" + echo -e " ${YELLOW}Dashboard may be accessible via port-forward or NodePort${NC}" fi -if [[ "$ALT_DOMAIN_ROUTE_OK" != "true" ]]; then - echo -e "${YELLOW}⚠ No valid IngressRoute found for dashboard.internal.${DOMAIN}${NC}" - echo -e "${YELLOW}This is not critical as dashboard.internal.${DOMAIN} is the primary hostname${NC}" +# Check for Traefik IngressRoutes +TRAEFIK_INGRESS_COUNT=$(kubectl get ingressroute -n traefik 2>/dev/null | wc -l || echo "1") +if [[ "$TRAEFIK_INGRESS_COUNT" -gt 1 ]]; then + echo -e " ${GREEN}✓ Found Traefik IngressRoutes${NC}" else - echo -e "${GREEN}✓ Found valid IngressRoute for dashboard.internal.${DOMAIN}${NC}" + echo -e " ${YELLOW}⚠ No Traefik IngressRoutes found${NC}" +fi + +# Check Docker Registry IngressRoutes +REGISTRY_INGRESS_COUNT=$(kubectl get ingressroute -n docker-registry 2>/dev/null | grep -c "docker-registry" || echo "0") +if [[ "$REGISTRY_INGRESS_COUNT" -gt 0 ]]; then + echo -e " ${GREEN}✓ Found $REGISTRY_INGRESS_COUNT docker registry IngressRoute(s)${NC}" +else + echo -e " ${YELLOW}⚠ No IngressRoutes found for docker registry${NC}" fi echo @@ -687,18 +757,21 @@ echo -e "${BLUE}=== Checking All IngressRoutes ===${NC}" echo -e "${YELLOW}IngressRoutes in kubernetes-dashboard namespace:${NC}" kubectl get ingressroute -n kubernetes-dashboard -o custom-columns=NAME:.metadata.name,ENTRYPOINTS:.spec.entryPoints,RULE:.spec.routes[0].match 2>/dev/null || echo "None found" -echo -e "${YELLOW}IngressRoutes in kube-system namespace:${NC}" -kubectl get ingressroute -n kube-system -o custom-columns=NAME:.metadata.name,ENTRYPOINTS:.spec.entryPoints,RULE:.spec.routes[0].match 2>/dev/null || echo "None found" +echo -e "${YELLOW}IngressRoutes in traefik namespace:${NC}" +kubectl get ingressroute -n traefik -o custom-columns=NAME:.metadata.name,ENTRYPOINTS:.spec.entryPoints,RULE:.spec.routes[0].match 2>/dev/null || echo "None found" + +echo -e "${YELLOW}IngressRoutes in docker-registry namespace:${NC}" +kubectl get ingressroute -n docker-registry -o custom-columns=NAME:.metadata.name,ENTRYPOINTS:.spec.entryPoints,RULE:.spec.routes[0].match 2>/dev/null || echo "None found" echo echo -e "${BLUE}=== Checking Middleware Configuration ===${NC}" -# Check middleware status in both namespaces +# Check middleware status in namespaces echo -e "${YELLOW}Middlewares in kubernetes-dashboard namespace:${NC}" kubectl get middleware -n kubernetes-dashboard -o custom-columns=NAME:.metadata.name,TYPE:.spec.ipWhiteList 2>/dev/null || echo "None found" -echo -e "${YELLOW}Middlewares in kube-system namespace:${NC}" -kubectl get middleware -n kube-system -o custom-columns=NAME:.metadata.name,TYPE:.spec.ipWhiteList 2>/dev/null || echo "None found" +echo -e "${YELLOW}Middlewares in traefik namespace:${NC}" +kubectl get middleware -n traefik -o custom-columns=NAME:.metadata.name,TYPE:.spec.ipWhiteList 2>/dev/null || echo "None found" # Verify middleware is in the same namespace as IngressRoute if echo "$KUBE_SYSTEM_ROUTE_CHECK" | grep -q "FAILED"; then @@ -868,13 +941,18 @@ else fi fi - # Try the alternative domain as well - echo -e "${YELLOW}Testing access to alternative dashboard URL...${NC}" - ALT_CURL_OUTPUT=$(kubectl exec validation-test -- curl -v -k --connect-timeout 5 --max-time 10 https://dashboard.internal.${DOMAIN}/ 2>&1 || echo "Connection failed") + # Try checking the service directly + echo -e "${YELLOW}Testing direct service access...${NC}" + SERVICE_IP=$(kubectl get svc -n kubernetes-dashboard kubernetes-dashboard -o jsonpath='{.spec.clusterIP}' 2>/dev/null) + if [[ -n "$SERVICE_IP" ]]; then + ALT_CURL_OUTPUT=$(kubectl exec validation-test -- curl -v -k --connect-timeout 5 --max-time 10 https://${SERVICE_IP}/ 2>&1 || echo "Connection failed") + else + ALT_CURL_OUTPUT="Service IP not found" + fi if echo "$ALT_CURL_OUTPUT" | grep -q "HTTP/[0-9.]\+ 200"; then - echo -e "${GREEN}✓ Successfully connected to dashboard.internal.${DOMAIN}${NC}" - echo -e "${YELLOW}Note: The alternative URL works but the primary one doesn't${NC}" + echo -e "${GREEN}✓ Successfully connected to dashboard service directly${NC}" + echo -e "${YELLOW}Note: Direct service access works but ingress routing may have issues${NC}" # Extract a bit of content to show it's working ALT_CONTENT=$(echo "$ALT_CURL_OUTPUT" | grep -A5 "" | head -n3 | sed 's/^/ /') @@ -883,7 +961,7 @@ else echo "$ALT_CONTENT" fi else - echo -e "${RED}✗ Failed to access dashboard.internal.${DOMAIN} as well${NC}" + echo -e "${RED}✗ Failed to access dashboard service directly as well${NC}" echo -e "${YELLOW}This indicates a deeper issue with the dashboard setup or network configuration${NC}" # Show error details @@ -892,7 +970,7 @@ else echo "$ALT_CURL_OUTPUT" | grep -E "Connected to|TLS|HTTP|Failed|error|* connection|timeout|certificate|refused|resolve" | head -5 | sed 's/^/ /' fi - ISSUES_FOUND+=("Cannot access dashboard.internal.${DOMAIN}") + ISSUES_FOUND+=("Cannot access dashboard.${INTERNAL_DOMAIN} via any method") fi fi @@ -1002,7 +1080,7 @@ if [[ ${#ISSUES_FOUND[@]} -gt 0 ]]; then # Core recommendation echo -e "${BOLD}Primary Fix:${NC}" echo -e "${CYAN}Run the complete setup script to fix all issues at once:${NC}" - echo -e "${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/setup-all.sh${NC}" + echo -e "${YELLOW}cd ${WC_HOME} && ./setup/cluster/install-all.sh${NC}" echo echo -e "${BOLD}Component-Specific Fixes:${NC}" @@ -1010,42 +1088,42 @@ if [[ ${#ISSUES_FOUND[@]} -gt 0 ]]; then # MetalLB specific recommendations if issue_matches "MetalLB" || issue_matches "LoadBalancer" || issue_matches "IP allocation" || issue_matches "address"; then echo -e "${CYAN}For MetalLB and IP allocation issues:${NC}" - echo -e " 1. Run the MetalLB setup script: ${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/setup-metallb.sh${NC}" + echo -e " 1. Run the MetalLB setup script: ${YELLOW}cd ${WC_HOME} && ./setup/cluster/metallb/install.sh${NC}" echo -e " 2. Check for conflicting services: ${YELLOW}kubectl get svc -A --field-selector type=LoadBalancer${NC}" echo -e " 3. If you have conflicting IP allocations, edit the service that shouldn't have the IP:" echo -e " ${YELLOW}kubectl edit svc <service-name> -n <namespace>${NC}" echo -e " Remove the metallb.universe.tf/loadBalancerIPs annotation" - echo -e " 4. Check MetalLB logs for errors: ${YELLOW}kubectl logs -n metallb-system -l app=metallb,component=controller${NC}" + echo -e " 4. Check MetalLB logs for errors: ${YELLOW}kubectl logs -n metallb-system -l app.kubernetes.io/name=metallb${NC}" fi # Dashboard specific recommendations if issue_matches "Dashboard" || issue_matches "dashboard"; then echo -e "${CYAN}For dashboard issues:${NC}" - echo -e " ${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/setup-dashboard.sh${NC}" - echo -e " Alternatively, use port-forwarding to access the dashboard: ${YELLOW}./bin/dashboard-port-forward${NC}" - echo -e " Get authentication token with: ${YELLOW}./bin/dashboard-token${NC}" + echo -e " ${YELLOW}cd ${WC_HOME} && ./setup/cluster/kubernetes-dashboard/install.sh${NC}" + echo -e " Alternatively, use port-forwarding to access the dashboard: ${YELLOW}kubectl port-forward -n kubernetes-dashboard svc/kubernetes-dashboard 8443:443${NC}" + echo -e " Get authentication token with: ${YELLOW}kubectl -n kubernetes-dashboard create token dashboard-admin${NC}" fi # CoreDNS specific recommendations if issue_matches "DNS"; then echo -e "${CYAN}For DNS resolution issues:${NC}" - echo -e " ${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/setup-coredns.sh${NC}" - echo -e " Verify DNS resolution: ${YELLOW}kubectl exec -it $(kubectl get pod -l k8s-app=kube-dns -n kube-system -o name | head -1) -n kube-system -- nslookup dashboard.internal.${DOMAIN}${NC}" + echo -e " ${YELLOW}cd ${WC_HOME} && ./setup/cluster/coredns/install.sh${NC}" + echo -e " Verify DNS resolution: ${YELLOW}kubectl exec -it $(kubectl get pod -l k8s-app=kube-dns -n kube-system -o name | head -1) -n kube-system -- nslookup dashboard.${INTERNAL_DOMAIN}${NC}" fi # Traefik/IngressRoute issues if issue_matches "IngressRoute" || issue_matches "ServersTransport" || issue_matches "Middleware"; then echo -e "${CYAN}For Traefik routing issues:${NC}" - echo -e " 1. Delete conflicting resources: ${YELLOW}kubectl delete ingressroute,middleware -n kubernetes-dashboard -l app=kubernetes-dashboard${NC}" - echo -e " 2. Re-run dashboard setup: ${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/setup-dashboard.sh${NC}" - echo -e " 3. Check Traefik status: ${YELLOW}kubectl get pods -n kube-system -l app.kubernetes.io/name=traefik${NC}" + echo -e " 1. Check Traefik installation: ${YELLOW}cd ${WC_HOME} && ./setup/cluster/traefik/install.sh${NC}" + echo -e " 2. Re-run dashboard setup: ${YELLOW}cd ${WC_HOME} && ./setup/cluster/kubernetes-dashboard/install.sh${NC}" + echo -e " 3. Check Traefik status: ${YELLOW}kubectl get pods -n traefik -l app.kubernetes.io/name=traefik${NC}" fi # Certificate issues if issue_matches "certificate" || issue_matches "TLS"; then echo -e "${CYAN}For certificate issues:${NC}" echo -e " 1. Check certificate status: ${YELLOW}kubectl get certificate,certificaterequest -A${NC}" - echo -e " 2. Re-run cert-manager setup: ${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/setup-cert-manager.sh${NC}" + echo -e " 2. Re-run cert-manager setup: ${YELLOW}cd ${WC_HOME} && ./setup/cluster/cert-manager/install.sh${NC}" fi echo @@ -1057,11 +1135,11 @@ if [[ ${#ISSUES_FOUND[@]} -gt 0 ]]; then echo -e "3. ${CYAN}Check all IngressRoutes:${NC}" echo -e " ${YELLOW}kubectl get ingressroute --all-namespaces${NC}" echo -e "4. ${CYAN}Re-run validation after fixes:${NC}" - echo -e " ${YELLOW}cd ${ROOT_DIR} && ./infrastructure_setup/validate_setup.sh${NC}" + echo -e " ${YELLOW}cd ${WC_HOME} && ./setup/cluster/validate-setup.sh${NC}" else echo -e "${GREEN}All validation checks passed! Your infrastructure is set up correctly.${NC}" - echo -e "${CYAN}✓ Dashboard is accessible at: https://dashboard.internal.${DOMAIN}${NC}" - echo -e "${CYAN}✓ Get authentication token with: ./bin/dashboard-token${NC}" + echo -e "${CYAN}✓ Dashboard is accessible at: https://dashboard.${INTERNAL_DOMAIN}${NC}" + echo -e "${CYAN}✓ Get authentication token with: kubectl -n kubernetes-dashboard create token dashboard-admin${NC}" echo echo -e "${YELLOW}Next Steps:${NC}" echo -e "1. Access the dashboard and verify cluster health" diff --git a/setup/dnsmasq/.not_logged_in_yet b/setup/dnsmasq/.not_logged_in_yet index 8e39144..5d4175c 100644 --- a/setup/dnsmasq/.not_logged_in_yet +++ b/setup/dnsmasq/.not_logged_in_yet @@ -4,10 +4,10 @@ PRESET_NET_ETHERNET_ENABLED="1" PRESET_NET_WIFI_ENABLED="0" PRESET_NET_USE_STATIC="1" -PRESET_NET_STATIC_IP="{{ (ds "config").cloud.dns.ip }}" +PRESET_NET_STATIC_IP="{{ .cloud.dns.ip }}" PRESET_NET_STATIC_MASK="255.255.255.0" -PRESET_NET_STATIC_GATEWAY="{{ (ds "config").cloud.router.ip }}" -PRESET_NET_STATIC_DNS="{{ (ds "config").cloud.dns.ip }}" +PRESET_NET_STATIC_GATEWAY="{{ .cloud.router.ip }}" +PRESET_NET_STATIC_DNS="{{ .cloud.dns.ip }}" # For example: # PRESET_NET_STATIC_IP="192.168.8.50" diff --git a/setup/dnsmasq/README.md b/setup/dnsmasq/README.md index 483117f..6b5adb1 100644 --- a/setup/dnsmasq/README.md +++ b/setup/dnsmasq/README.md @@ -27,14 +27,15 @@ A "PXE client" is any machine that is booting using PXE. This is a great way to - Install a Linux machine on your LAN. Record it's IP address in your `config:cloud.dns.ip`. - Ensure it is accessible with ssh. -- Run `setup/dnsmasq/bin/create-setup-bundle.sh` -- Run `setup/dnsmasq/bin/transfer-setup-bundle.sh` +- From your wild-cloud directory, run `wild-central-generate-setup`. +- Run `cluster/dnsmasq/bin/create-setup-bundle.sh` +- Run `cluster/dnsmasq/bin/transfer-setup-bundle.sh` Now ssh into your dnsmasq machine and do the following: ```bash sudo -i -cd dnsmasq-setup +cd /root/dnsmasq-setup ./setup.sh ``` diff --git a/setup/dnsmasq/bin/create-setup-bundle.sh b/setup/dnsmasq/bin/create-setup-bundle.sh index 4c5bcb6..02ee702 100755 --- a/setup/dnsmasq/bin/create-setup-bundle.sh +++ b/setup/dnsmasq/bin/create-setup-bundle.sh @@ -1,7 +1,8 @@ #!/bin/bash -if [ ! -d ".wildcloud" ]; then - echo "Error: You must run this script from a wild-cloud directory" +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`." exit 1 fi @@ -9,7 +10,7 @@ WILDCLOUD_ROOT=$(wild-config wildcloud.root) || exit 1 # --- -DNSMASQ_SETUP_DIR="./setup/dnsmasq" +DNSMASQ_SETUP_DIR="${WC_ROOT}/setup/dnsmasq" BUNDLE_DIR="${DNSMASQ_SETUP_DIR}/setup-bundle" mkdir -p "${BUNDLE_DIR}" @@ -20,16 +21,20 @@ PXE_WEB_ROOT="${BUNDLE_DIR}/ipxe-web" mkdir -p "${PXE_WEB_ROOT}/amd64" cp "${DNSMASQ_SETUP_DIR}/boot.ipxe" "${PXE_WEB_ROOT}/boot.ipxe" -# Create Talos bare metal boot assets. -# This uses the Talos factory API to create boot assets for bare metal nodes. -# These assets include the kernel and initramfs needed for PXE booting Talos on bare metal. -echo "Creating Talos bare metal boot assets..." -TALOS_ID=$(curl -X POST --data-binary @${DNSMASQ_SETUP_DIR}/bare-metal.yaml https://factory.talos.dev/schematics | jq -r '.id') +# Get Talos schematic ID from centralized config. +# The schematic should be uploaded via wild-talos-schema first. +echo "Getting Talos schematic ID from config..." +TALOS_ID=$(wild-config cluster.nodes.talos.schematicId) if [ -z "${TALOS_ID}" ] || [ "${TALOS_ID}" = "null" ]; then - echo "Error: Failed to create Talos bare metal boot assets" + echo "Error: No schematic ID found in config.yaml" + echo "Run 'wild-talos-schema' first to upload schematic and get ID" exit 1 fi -echo "Successfully created Talos bare metal boot assets with ID: ${TALOS_ID}" +echo "Using Talos schematic ID: ${TALOS_ID}" + +# Verify schematic includes expected extensions +echo "Schematic includes:" +yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' ./config.yaml | sed 's/^/ - /' # Download kernel to ipxe-web if it's not already there. TALOS_VERSION=$(wild-config cluster.nodes.talos.version) || exit 1 diff --git a/setup/dnsmasq/bin/transfer-setup-bundle.sh b/setup/dnsmasq/bin/transfer-setup-bundle.sh index 1d71a1d..9687393 100755 --- a/setup/dnsmasq/bin/transfer-setup-bundle.sh +++ b/setup/dnsmasq/bin/transfer-setup-bundle.sh @@ -1,12 +1,13 @@ #!/bin/bash -if [ ! -d ".wildcloud" ]; then - echo "Error: You must run this script from a wild-cloud directory" +# Check if WC_HOME is set +if [ -z "${WC_HOME:-}" ]; then + echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`." exit 1 fi -SERVER_HOST=$(wild-config cloud.dns.ip2) || exit 1 -SETUP_DIR="./setup/dnsmasq/setup-bundle" +SERVER_HOST=$(wild-config cloud.dns.ip) || exit 1 +SETUP_DIR="${WC_HOME}/setup/dnsmasq/setup-bundle" DESTINATION_DIR="~/dnsmasq-setup" echo "Copying DNSMasq setup files to ${SERVER_HOST}:${DESTINATION_DIR}..." diff --git a/setup/dnsmasq/boot.ipxe b/setup/dnsmasq/boot.ipxe index 2106997..8958d91 100644 --- a/setup/dnsmasq/boot.ipxe +++ b/setup/dnsmasq/boot.ipxe @@ -1,5 +1,5 @@ !ipxe imgfree -kernel http://{{ (ds "config").cloud.dns.ip }}/amd64/vmlinuz talos.platform=metal console=tty0 init_on_alloc=1 slab_nomerge pti=on consoleblank=0 nvme_core.io_timeout=4294967295 printk.devkmsg=on ima_template=ima-ng ima_appraise=fix ima_hash=sha512 selinux=1 net.ifnames=0 -initrd http://{{ (ds "config").cloud.dns.ip }}/amd64/initramfs.xz +kernel http://{{ .cloud.dns.ip }}/amd64/vmlinuz talos.platform=metal console=tty0 init_on_alloc=1 slab_nomerge pti=on consoleblank=0 nvme_core.io_timeout=4294967295 printk.devkmsg=on ima_template=ima-ng ima_appraise=fix ima_hash=sha512 selinux=1 net.ifnames=0 +initrd http://{{ .cloud.dns.ip }}/amd64/initramfs.xz boot diff --git a/setup/dnsmasq/dnsmasq.conf b/setup/dnsmasq/dnsmasq.conf index d8db128..9c81cb5 100644 --- a/setup/dnsmasq/dnsmasq.conf +++ b/setup/dnsmasq/dnsmasq.conf @@ -1,26 +1,24 @@ # Configuration file for dnsmasq. # Basic Settings -interface={{ (ds "config").cloud.dnsmasq.interface }} -listen-address={{ (ds "config").cloud.dns.ip }} +interface={{ .cloud.dnsmasq.interface }} +listen-address={{ .cloud.dns.ip }} domain-needed bogus-priv no-resolv # DNS Forwarding -# local=/{{ (ds "config").cloud.domain }}/ -# address=/{{ (ds "config").cloud.domain }}/{{ (ds "config").cluster.endpointIp }} -# local=/{{ (ds "config").cloud.internalDomain }}/ -# address=/{{ (ds "config").cloud.internalDomain }}/{{ (ds "config").cluster.endpointIp }} -server=/{{ (ds "config").cloud.domain }}/{{ (ds "config").cluster.endpointIp }} -server=/{{ (ds "config").cloud.internalDomain }}/{{ (ds "config").cluster.endpointIp }} +local=/{{ .cloud.domain }}/ +address=/{{ .cloud.domain }}/{{ .cluster.loadBalancerIp }} +local=/{{ .cloud.internalDomain }}/ +address=/{{ .cloud.internalDomain }}/{{ .cluster.loadBalancerIp }} server=1.1.1.1 server=8.8.8.8 # --- DHCP Settings --- -dhcp-range={{ (ds "config").cloud.dhcpRange }},12h -dhcp-option=3,{{ (ds "config").cloud.router.ip }} # gateway to assign -dhcp-option=6,{{ (ds "config").cloud.dns.ip }} # dns to assign +dhcp-range={{ .cloud.dhcpRange }},12h +dhcp-option=3,{{ .cloud.router.ip }} # gateway to assign +dhcp-option=6,{{ .cloud.dns.ip }} # dns to assign # --- PXE Booting --- enable-tftp @@ -34,7 +32,7 @@ dhcp-match=set:efi-arm64,option:client-arch,11 dhcp-boot=tag:efi-arm64,ipxe-arm64.efi dhcp-userclass=set:ipxe,iPXE -dhcp-boot=tag:ipxe,http://{{ (ds "config").cloud.dns.ip }}/boot.ipxe +dhcp-boot=tag:ipxe,http://{{ .cloud.dns.ip }}/boot.ipxe log-queries log-dhcp diff --git a/setup/dnsmasq/dnsmasq.reference b/setup/dnsmasq/dnsmasq.reference index 60aa6ca..0a94252 100644 --- a/setup/dnsmasq/dnsmasq.reference +++ b/setup/dnsmasq/dnsmasq.reference @@ -73,15 +73,15 @@ no-resolv # Add local-only domains here, queries in these domains are answered # from /etc/hosts or DHCP only. #local=/localnet/ -local=/{{ (ds "config").cloud.domain }}/ -local=/{{ (ds "config").cloud.internalDomain }}/ +local=/{{ .cloud.domain }}/ +local=/{{ .cloud.internalDomain }}/ # Add domains which you want to force to an IP address here. # The example below send any host in double-click.net to a local # web-server. #address=/double-click.net/127.0.0.1 -address=/{{ (ds "config").cloud.domain }}/{{ (ds "config").cluster.endpointIp }} -address=/{{ (ds "config").cloud.internalDomain }}/{{ (ds "config").cluster.endpointIp }} +address=/{{ .cloud.domain }}/{{ .cluster.loadBalancerIp }} +address=/{{ .cloud.internalDomain }}/{{ .cluster.loadBalancerIp }} # --address (and --server) work with IPv6 addresses too. #address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83 @@ -170,7 +170,7 @@ interface=eth0 # a lease time. If you have more than one network, you will need to # repeat this for each network on which you want to supply DHCP # service. -dhcp-range={{ (ds "config").cloud.dhcpRange }},12h +dhcp-range={{ .cloud.dhcpRange }},12h # This is an example of a DHCP range where the netmask is given. This # is needed for networks we reach the dnsmasq DHCP server via a relay @@ -345,7 +345,7 @@ dhcp-range={{ (ds "config").cloud.dhcpRange }},12h # Override the default route supplied by dnsmasq, which assumes the # router is the same machine as the one running dnsmasq. #dhcp-option=3,1.2.3.4 -dhcp-option=3,{{ (ds "config").cluster.router.ip }} +dhcp-option=3,{{ .cluster.router.ip }} # Do the same thing, but using the option name #dhcp-option=option:router,1.2.3.4 @@ -361,7 +361,7 @@ dhcp-option=3,{{ (ds "config").cluster.router.ip }} # Send DHCPv6 option. Note [] around IPv6 addresses. #dhcp-option=option6:dns-server,[1234::77],[1234::88] -# dhcp-option=option6:dns-server,{{ (ds "config").cluster.dns.ip }} +# dhcp-option=option6:dns-server,{{ .cluster.dns.ip }} # Send DHCPv6 option for namservers as the machine running # dnsmasq and another.