Settle on v1 setup method. Test run completed successfully from bootstrap to service setup.
- Refactor dnsmasq configuration and scripts for improved variable handling and clarity - Updated dnsmasq configuration files to use direct variable references instead of data source functions for better readability. - Modified setup scripts to ensure they are run from the correct environment and directory, checking for the WC_HOME variable. - Changed paths in README and scripts to reflect the new directory structure. - Enhanced error handling in setup scripts to provide clearer guidance on required configurations. - Adjusted kernel and initramfs URLs in boot.ipxe to use the updated variable references.
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
.env
|
||||
ca
|
||||
.bots/*/sessions
|
||||
.working
|
||||
|
23
bin/backup
Executable file
23
bin/backup
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
# Simple backup script for your personal cloud
|
||||
# This is a placeholder for future implementation
|
||||
|
||||
SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
|
||||
cd "$SCRIPT_DIR"
|
||||
if [[ -f "../load-env.sh" ]]; then
|
||||
source ../load-env.sh
|
||||
fi
|
||||
|
||||
BACKUP_DIR="${PROJECT_DIR}/backups/$(date +%Y-%m-%d)"
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Back up Kubernetes resources
|
||||
kubectl get all -A -o yaml > "$BACKUP_DIR/all-resources.yaml"
|
||||
kubectl get secrets -A -o yaml > "$BACKUP_DIR/secrets.yaml"
|
||||
kubectl get configmaps -A -o yaml > "$BACKUP_DIR/configmaps.yaml"
|
||||
|
||||
# Back up persistent volumes
|
||||
# TODO: Add logic to back up persistent volume data
|
||||
|
||||
echo "Backup completed: $BACKUP_DIR"
|
136
bin/helm-chart-to-kustomize
Executable file
136
bin/helm-chart-to-kustomize
Executable file
@@ -0,0 +1,136 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo "Usage: helm-chart-to-kustomize <repo/chart> <release-name> <namespace> [values-file]"
|
||||
echo ""
|
||||
echo "Convert a Helm chart to Kustomize manifests."
|
||||
echo ""
|
||||
echo "Arguments:"
|
||||
echo " repo/chart Helm chart repository and name (e.g., nginx-stable/nginx-ingress)"
|
||||
echo " release-name Name for the Helm release (e.g., ingress-controller)"
|
||||
echo " namespace Kubernetes namespace to deploy to"
|
||||
echo " values-file Optional values.yaml file for customization"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " helm-chart-to-kustomize nginx-stable/nginx-ingress ingress-controller ingress"
|
||||
echo " helm-chart-to-kustomize nginx-stable/nginx-ingress ingress-controller ingress values.yaml"
|
||||
echo ""
|
||||
echo "Output:"
|
||||
echo " Creates base/<release-name>/ directory with Kustomize-ready manifests"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [[ $# -lt 3 || "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
chart_repo="$1"
|
||||
release_name="$2"
|
||||
namespace="$3"
|
||||
values_file="${4:-}"
|
||||
|
||||
# Extract chart name from repo/chart
|
||||
chart_name="${chart_repo##*/}"
|
||||
|
||||
echo "Converting Helm chart to Kustomize: $chart_repo -> base/$release_name"
|
||||
|
||||
# Create working directories
|
||||
mkdir -p charts base
|
||||
|
||||
# Fetch the Helm chart if not already present
|
||||
if [[ -d "charts/$chart_name" ]]; then
|
||||
echo "Chart '$chart_name' already exists in 'charts/' directory. Skipping fetch."
|
||||
else
|
||||
echo "Fetching Helm chart: $chart_repo"
|
||||
|
||||
# Add repository if not already added
|
||||
repo_name="$(echo "$chart_repo" | cut -d'/' -f1)"
|
||||
if ! helm repo list 2>/dev/null | grep -q "^$repo_name"; then
|
||||
echo "Adding Helm repository: $repo_name"
|
||||
# Handle common repository URLs
|
||||
case "$repo_name" in
|
||||
"traefik")
|
||||
helm repo add "$repo_name" "https://traefik.github.io/charts"
|
||||
;;
|
||||
"nginx-stable")
|
||||
helm repo add "$repo_name" "https://helm.nginx.com/stable"
|
||||
;;
|
||||
*)
|
||||
# Try generic helm.sh pattern first
|
||||
helm repo add "$repo_name" "https://charts.helm.sh/$repo_name" 2>/dev/null || {
|
||||
echo "Error: Unknown repository '$repo_name'. Please add manually with 'helm repo add'."
|
||||
exit 1
|
||||
}
|
||||
;;
|
||||
esac
|
||||
helm repo update
|
||||
fi
|
||||
|
||||
if ! helm search repo "$chart_repo" >/dev/null 2>&1; then
|
||||
echo "Error: Helm chart '$chart_repo' not found in repositories."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
helm fetch --untar --untardir charts "$chart_repo"
|
||||
fi
|
||||
|
||||
# Build helm template command
|
||||
template_cmd="helm template --output-dir base --namespace $namespace"
|
||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
||||
template_cmd="$template_cmd --values $values_file"
|
||||
echo "Using values file: $values_file"
|
||||
fi
|
||||
template_cmd="$template_cmd $release_name charts/$chart_name"
|
||||
|
||||
# Clean existing base directory if it exists
|
||||
if [[ -d "base/$release_name" ]]; then
|
||||
echo "Existing base/$release_name directory found. Cleaning..."
|
||||
rm -rf "base/$release_name"
|
||||
fi
|
||||
|
||||
# Generate manifests with Helm template
|
||||
echo "Generating manifests with Helm template..."
|
||||
eval "$template_cmd"
|
||||
|
||||
# Create namespace manifest
|
||||
echo "Creating namespace manifest..."
|
||||
cat <<EOF > "base/$release_name/namespace.yaml"
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: $namespace
|
||||
EOF
|
||||
|
||||
# Generate kustomization.yaml
|
||||
echo "Generating kustomization.yaml..."
|
||||
cd "base/$release_name"
|
||||
|
||||
# Find all YAML files recursively and create kustomization
|
||||
resources=()
|
||||
while IFS= read -r -d '' file; do
|
||||
# Get relative path from current directory
|
||||
rel_path="${file#./}"
|
||||
resources+=("$rel_path")
|
||||
done < <(find . -name "*.yaml" -not -name "kustomization.yaml" -print0 | sort -z)
|
||||
|
||||
# Create kustomization.yaml with all resources
|
||||
cat > kustomization.yaml << EOF
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
EOF
|
||||
|
||||
for resource in "${resources[@]}"; do
|
||||
echo "- $resource" >> kustomization.yaml
|
||||
done
|
||||
|
||||
echo "✅ Conversion complete!"
|
||||
echo ""
|
||||
echo "Generated files in: base/$release_name/"
|
||||
echo "To apply with kubectl:"
|
||||
echo " kubectl apply -k base/$release_name"
|
||||
echo ""
|
||||
echo "To customize further, edit the files in base/$release_name/ and regenerate kustomization.yaml if needed."
|
@@ -7,7 +7,7 @@ set -o pipefail
|
||||
usage() {
|
||||
echo "Usage: wild-compile-template [options]"
|
||||
echo ""
|
||||
echo "Compile a gomplate template from stdin using ./config.yaml as context."
|
||||
echo "Compile a gomplate template from stdin using \$WC_HOME/config.yaml as context."
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " echo 'Hello {{.config.cluster.name}}' | wild-compile-template"
|
||||
@@ -37,17 +37,26 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
done
|
||||
|
||||
if [ ! -f "./config.yaml" ]; then
|
||||
echo "Error: ./config.yaml not found in current directory" >&2
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build gomplate command with config context (enables .config shorthand)
|
||||
gomplate_cmd="gomplate -c config=./config.yaml"
|
||||
CONFIG_FILE="${WC_HOME}/config.yaml"
|
||||
SECRETS_FILE="${WC_HOME}/secrets.yaml"
|
||||
|
||||
if [ ! -f "${CONFIG_FILE}" ]; then
|
||||
echo "Error: config.yaml not found at ${CONFIG_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build gomplate command with config context
|
||||
gomplate_cmd="gomplate -c .=${CONFIG_FILE}"
|
||||
|
||||
# Add secrets context if secrets.yaml exists (enables .secrets shorthand)
|
||||
if [ -f "./secrets.yaml" ]; then
|
||||
gomplate_cmd="${gomplate_cmd} -c secrets=./secrets.yaml"
|
||||
if [ -f "${SECRETS_FILE}" ]; then
|
||||
gomplate_cmd="${gomplate_cmd} -c secrets=${SECRETS_FILE}"
|
||||
fi
|
||||
|
||||
# Execute gomplate with stdin
|
||||
|
98
bin/wild-compile-template-dir
Executable file
98
bin/wild-compile-template-dir
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo "Usage: wild-compile-template-dir [options] <source_dir> [dest_dir]"
|
||||
echo ""
|
||||
echo "Recursively copy all files from source_dir to dest_dir, processing text files through wild-compile-template."
|
||||
echo "Binary files are copied as-is. Directory structure is preserved."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --clean Delete destination directory before processing"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
echo "Arguments:"
|
||||
echo " source_dir Source directory to process"
|
||||
echo " dest_dir Destination directory (default: source_dir_compiled)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " wild-compile-template-dir ./templates"
|
||||
echo " wild-compile-template-dir ./templates ./output"
|
||||
echo " wild-compile-template-dir --clean ./templates"
|
||||
echo " wild-compile-template-dir --clean ./templates ./output"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
clean_flag=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--clean)
|
||||
clean_flag=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-*)
|
||||
echo "Unknown option: $1" >&2
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
source_dir="$1"
|
||||
dest_dir="${2:-${source_dir}_compiled}"
|
||||
|
||||
|
||||
# Validate source directory
|
||||
if [[ ! -d "$source_dir" ]]; then
|
||||
echo "Error: Source directory does not exist: $source_dir" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean destination directory if requested
|
||||
if [[ "$clean_flag" == true && -d "$dest_dir" ]]; then
|
||||
echo "Cleaning destination directory: $dest_dir"
|
||||
rm -rf "$dest_dir"
|
||||
fi
|
||||
|
||||
# Create destination directory
|
||||
mkdir -p "$dest_dir"
|
||||
|
||||
echo "Processing directory: $source_dir -> $dest_dir"
|
||||
|
||||
# Process all files recursively
|
||||
find "$source_dir" -type f -print0 | while IFS= read -r -d '' file; do
|
||||
# Get relative path from source directory
|
||||
rel_path="${file#$source_dir/}"
|
||||
dest_file="$dest_dir/$rel_path"
|
||||
dest_file_dir="$(dirname "$dest_file")"
|
||||
|
||||
# Create destination directory structure
|
||||
mkdir -p "$dest_file_dir"
|
||||
|
||||
# Check if file is text using file command
|
||||
if file --mime-type "$file" 2>/dev/null | grep -q 'text/'; then
|
||||
echo " Processing: $rel_path"
|
||||
if ! cat "$file" | wild-compile-template > "$dest_file"; then
|
||||
echo " ✗ Failed to process: $rel_path" >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo " Copying: $rel_path"
|
||||
cp "$file" "$dest_file"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Complete: All files processed successfully"
|
@@ -7,7 +7,7 @@ set -o pipefail
|
||||
usage() {
|
||||
echo "Usage: wild-config <yaml_key_path>"
|
||||
echo ""
|
||||
echo "Read a value from ./config.yaml using a YAML key path."
|
||||
echo "Read a value from \$WC_HOME/config.yaml using a YAML key path."
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " wild-config 'cluster.name' # Get cluster name"
|
||||
@@ -49,17 +49,25 @@ if [ -z "${KEY_PATH}" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "./config.yaml" ]; then
|
||||
echo "Error: ./config.yaml not found in current directory"
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONFIG_FILE="${WC_HOME}/config.yaml"
|
||||
|
||||
if [ ! -f "${CONFIG_FILE}" ]; then
|
||||
echo "Error: config file not found at ${CONFIG_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Use yq to extract the value from the YAML file
|
||||
result=$(yq eval ".${KEY_PATH}" ./config.yaml)
|
||||
result=$(yq eval ".${KEY_PATH}" "${CONFIG_FILE}") 2>/dev/null
|
||||
|
||||
# Check if result is null (key not found)
|
||||
if [ "${result}" = "null" ]; then
|
||||
echo "Error: Key path '${KEY_PATH}' not found in ./config.yaml" >&2
|
||||
echo "Error: Key path '${KEY_PATH}' not found in ${CONFIG_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
74
bin/wild-secret
Executable file
74
bin/wild-secret
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# Usage function
|
||||
usage() {
|
||||
echo "Usage: wild-secret <yaml_key_path>"
|
||||
echo ""
|
||||
echo "Read a value from ./secrets.yaml using a YAML key path."
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " wild-secret 'database.password' # Get database password"
|
||||
echo " wild-secret 'api.keys.github' # Get GitHub API key"
|
||||
echo " wild-secret 'credentials[0].token' # Get first credential token"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-*)
|
||||
echo "Unknown option $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
if [ -z "${KEY_PATH}" ]; then
|
||||
KEY_PATH="$1"
|
||||
else
|
||||
echo "Too many arguments"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "${KEY_PATH}" ]; then
|
||||
echo "Error: YAML key path is required"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SECRETS_FILE="${WC_HOME}/secrets.yaml"
|
||||
|
||||
if [ ! -f "${SECRETS_FILE}" ]; then
|
||||
echo "Error: secrets file not found at ${SECRETS_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Use yq to extract the value from the YAML file
|
||||
result=$(yq eval ".${KEY_PATH}" "${SECRETS_FILE}" 2>/dev/null)
|
||||
|
||||
# Check if result is null (key not found)
|
||||
if [ "${result}" = "null" ]; then
|
||||
echo "Error: Key path '${KEY_PATH}' not found in ${SECRETS_FILE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "${result}"
|
137
bin/wild-talos-iso
Executable file
137
bin/wild-talos-iso
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Talos ISO download script
|
||||
# Downloads custom Talos ISO with system extensions for USB boot
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set. Run \`source .env\`."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONFIG_FILE="${WC_HOME}/config.yaml"
|
||||
ISO_DIR="${WC_HOME}/.wildcloud/iso"
|
||||
FORCE_DOWNLOAD=false
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--force)
|
||||
FORCE_DOWNLOAD=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: wild-talos-iso [--force]"
|
||||
echo ""
|
||||
echo "Downloads custom Talos ISO with system extensions for USB boot."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --force Force re-download even if ISO already exists"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
echo "This script:"
|
||||
echo " 1. Gets schematic ID and Talos version from config.yaml"
|
||||
echo " 2. Downloads custom ISO from Talos Image Factory"
|
||||
echo " 3. Saves ISO to .wildcloud/iso/ directory"
|
||||
echo ""
|
||||
echo "The ISO includes extensions configured in config.yaml:"
|
||||
echo " (.cluster.nodes.talos.schematic.customization.systemExtensions)"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Downloading custom Talos ISO with system extensions..."
|
||||
|
||||
# Get Talos version and schematic ID from config
|
||||
TALOS_VERSION=$(yq eval '.cluster.nodes.talos.version' "$CONFIG_FILE")
|
||||
SCHEMATIC_ID=$(yq eval '.cluster.nodes.talos.schematicId // ""' "$CONFIG_FILE")
|
||||
|
||||
if [ -z "$TALOS_VERSION" ] || [ "$TALOS_VERSION" = "null" ]; then
|
||||
echo "Error: No Talos version found in config.yaml at .cluster.nodes.talos.version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "null" ]; then
|
||||
echo "Error: No schematic ID found in config.yaml"
|
||||
echo "Run 'wild-talos-schema' first to upload schematic and get ID"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Talos version: $TALOS_VERSION"
|
||||
echo "Schematic ID: $SCHEMATIC_ID"
|
||||
echo ""
|
||||
echo "ISO includes extensions:"
|
||||
yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "$CONFIG_FILE" | sed 's/^/ - /'
|
||||
echo ""
|
||||
|
||||
# Create ISO directory
|
||||
mkdir -p "$ISO_DIR"
|
||||
|
||||
# Define ISO filename and path
|
||||
ISO_FILENAME="talos-${TALOS_VERSION}-metal-amd64.iso"
|
||||
ISO_PATH="${ISO_DIR}/${ISO_FILENAME}"
|
||||
|
||||
# Check if ISO already exists
|
||||
if [ -f "$ISO_PATH" ] && [ "$FORCE_DOWNLOAD" = false ]; then
|
||||
echo "✅ ISO already exists: $ISO_PATH"
|
||||
echo "Use --force to re-download"
|
||||
echo ""
|
||||
echo "To create a bootable USB:"
|
||||
echo " See docs/node_setup.md for USB creation instructions"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Download ISO from Image Factory
|
||||
ISO_URL="https://factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/metal-amd64.iso"
|
||||
echo "Downloading ISO from: $ISO_URL"
|
||||
echo "Saving to: $ISO_PATH"
|
||||
echo ""
|
||||
|
||||
# Download with progress bar
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
wget --progress=bar:force -O "$ISO_PATH" "$ISO_URL"
|
||||
elif command -v curl >/dev/null 2>&1; then
|
||||
curl -L --progress-bar -o "$ISO_PATH" "$ISO_URL"
|
||||
else
|
||||
echo "Error: Neither wget nor curl is available for downloading"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify download
|
||||
if [ ! -f "$ISO_PATH" ] || [ ! -s "$ISO_PATH" ]; then
|
||||
echo "Error: Download failed or file is empty"
|
||||
rm -f "$ISO_PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get file size for verification
|
||||
FILE_SIZE=$(du -h "$ISO_PATH" | cut -f1)
|
||||
|
||||
echo ""
|
||||
echo "✅ Custom Talos ISO downloaded successfully!"
|
||||
echo ""
|
||||
echo "ISO Details:"
|
||||
echo " File: $ISO_PATH"
|
||||
echo " Size: $FILE_SIZE"
|
||||
echo " Version: $TALOS_VERSION"
|
||||
echo " Extensions: $(yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions | length' "$CONFIG_FILE") extensions included"
|
||||
echo " Auto-wipe: Enabled (will wipe existing Talos installations)"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Create bootable USB drive (see docs/node_setup.md)"
|
||||
echo "2. Boot target machine from USB"
|
||||
echo "3. Run hardware detection: ./detect-node-hardware.sh <maintenance-ip> <node-number>"
|
||||
echo "4. Apply machine configuration"
|
||||
echo ""
|
||||
echo "USB Creation Quick Reference:"
|
||||
echo " Linux: sudo dd if=$ISO_PATH of=/dev/sdX bs=4M status=progress"
|
||||
echo " macOS: sudo dd if=$ISO_PATH of=/dev/rdiskX bs=4m"
|
||||
echo " Windows: Use Rufus, Balena Etcher, or similar tool"
|
113
bin/wild-talos-schema
Executable file
113
bin/wild-talos-schema
Executable file
@@ -0,0 +1,113 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Talos schematic management script
|
||||
# This script manages Talos Image Factory schematics centrally
|
||||
# Usage: wild-talos-schema [--force]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set. Run \`source .env\`."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CONFIG_FILE="${WC_HOME}/config.yaml"
|
||||
FORCE_UPLOAD=false
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--force)
|
||||
FORCE_UPLOAD=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: wild-talos-schema [--force]"
|
||||
echo ""
|
||||
echo "Manages Talos Image Factory schematics centrally."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --force Force re-upload even if schematicId already exists"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
echo "This script:"
|
||||
echo " 1. Reads schematic from config.yaml (.cluster.nodes.talos.schematic)"
|
||||
echo " 2. Uploads it to Image Factory if needed"
|
||||
echo " 3. Stores the schematicId in config.yaml (.cluster.nodes.talos.schematicId)"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Managing Talos schematic for wildcloud..."
|
||||
|
||||
# Check if schematic exists in config.yaml
|
||||
if ! yq eval '.cluster.nodes.talos.schematic' "$CONFIG_FILE" | grep -v "null" >/dev/null 2>&1; then
|
||||
echo "Error: No schematic found in config.yaml at .cluster.nodes.talos.schematic"
|
||||
echo "Expected schematic configuration with systemExtensions"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if schematicId already exists (unless force)
|
||||
EXISTING_ID=$(yq eval '.cluster.nodes.talos.schematicId // ""' "$CONFIG_FILE")
|
||||
if [ -n "$EXISTING_ID" ] && [ "$FORCE_UPLOAD" = false ]; then
|
||||
echo "✅ Schematic ID already exists: $EXISTING_ID"
|
||||
echo "Use --force to re-upload and generate a new ID"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Extracting schematic from config.yaml..."
|
||||
|
||||
# Create temporary schematic file
|
||||
TEMP_SCHEMATIC=$(mktemp)
|
||||
trap "rm -f $TEMP_SCHEMATIC" EXIT
|
||||
|
||||
# Extract schematic from config.yaml
|
||||
yq eval '.cluster.nodes.talos.schematic' "$CONFIG_FILE" > "$TEMP_SCHEMATIC"
|
||||
|
||||
echo "Schematic contents:"
|
||||
cat "$TEMP_SCHEMATIC"
|
||||
echo ""
|
||||
|
||||
# Upload schematic to Image Factory
|
||||
echo "Uploading schematic to Talos Image Factory..."
|
||||
SCHEMATIC_RESPONSE=$(curl -s -X POST --data-binary @"$TEMP_SCHEMATIC" https://factory.talos.dev/schematics)
|
||||
|
||||
if [ -z "$SCHEMATIC_RESPONSE" ]; then
|
||||
echo "Error: Failed to upload schematic to Image Factory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Parse schematic ID from JSON response
|
||||
SCHEMATIC_ID=$(echo "$SCHEMATIC_RESPONSE" | sed 's/.*"id":"\([^"]*\)".*/\1/')
|
||||
|
||||
if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "$SCHEMATIC_RESPONSE" ]; then
|
||||
echo "Error: Failed to parse schematic ID from response: $SCHEMATIC_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Schematic uploaded successfully!"
|
||||
echo "Schematic ID: $SCHEMATIC_ID"
|
||||
|
||||
# Update config.yaml with schematic ID
|
||||
echo "Updating config.yaml with schematic ID..."
|
||||
yq eval ".cluster.nodes.talos.schematicId = \"$SCHEMATIC_ID\"" -i "$CONFIG_FILE"
|
||||
|
||||
echo ""
|
||||
echo "🎉 Schematic management complete!"
|
||||
echo ""
|
||||
echo "Schematic ID: $SCHEMATIC_ID"
|
||||
echo "Saved to: config.yaml (.cluster.nodes.talos.schematicId)"
|
||||
echo ""
|
||||
echo "This schematic includes:"
|
||||
yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "$CONFIG_FILE" | sed 's/^/ - /'
|
||||
echo ""
|
||||
echo "Other scripts can now use this schematicId:"
|
||||
echo " - setup/dnsmasq/bin/create-setup-bundle.sh (PXE boot assets)"
|
||||
echo " - setup/cluster-nodes/create-installer-image.sh (custom installer)"
|
@@ -1,5 +1,11 @@
|
||||
# Setting Up Your Wild-cloud
|
||||
|
||||
Install dependencies:
|
||||
|
||||
```bash
|
||||
scripts/setup-utils.sh
|
||||
```
|
||||
|
||||
Add the `bin` directory to your path.
|
||||
|
||||
Initialize a personal wild-cloud in any empty directory, for example:
|
||||
|
49
env.sh
Normal file
49
env.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Set the WC_HOME environment variable to this script's directory.
|
||||
# This variable is used consistently across the Wild Config scripts.
|
||||
export WC_HOME="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" && pwd)"
|
||||
|
||||
# Add bin to path first so wild-config is available
|
||||
export PATH="$WC_HOME/bin:$PATH"
|
||||
|
||||
# Install kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Error: kubectl is not installed. Installing."
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
fi
|
||||
|
||||
# Install talosctl
|
||||
if ! command -v talosctl &> /dev/null; then
|
||||
echo "Error: talosctl is not installed. Installing."
|
||||
curl -sL https://talos.dev/install | sh
|
||||
fi
|
||||
|
||||
|
||||
# Check if gomplate is installed
|
||||
if ! command -v gomplate &> /dev/null; then
|
||||
echo "Error: gomplate is not installed. Please install gomplate first."
|
||||
echo "Visit: https://docs.gomplate.ca/installing/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KUBECONFIG=~/.kube/config
|
||||
export KUBECONFIG
|
||||
|
||||
# Use cluster name as both talos and kubectl context name
|
||||
CLUSTER_NAME=$(wild-config cluster.name)
|
||||
if [ -z "${CLUSTER_NAME}" ] || [ "${CLUSTER_NAME}" = "null" ]; then
|
||||
echo "Error: cluster.name not set in config.yaml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Only try to use the kubectl context if it exists
|
||||
if kubectl config get-contexts "${CLUSTER_NAME}" >/dev/null 2>&1; then
|
||||
kubectl config use-context "${CLUSTER_NAME}"
|
||||
echo "Using Kubernetes context: ${CLUSTER_NAME}"
|
||||
# else
|
||||
# echo "Kubernetes context '${CLUSTER_NAME}' not found, skipping context switch"
|
||||
fi
|
178
load-env.sh
178
load-env.sh
@@ -1,178 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# This script sources environment variables from:
|
||||
# 1. The root .env file
|
||||
# 2. App-specific .env files from enabled apps (with install=true in manifest.yaml)
|
||||
# Dependencies are respected - if app A requires app B, app B's .env is sourced first
|
||||
# set -e
|
||||
|
||||
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ENV_FILE="$PROJECT_DIR/.env"
|
||||
BIN_DIR="$PROJECT_DIR/bin"
|
||||
APPS_DIR="$PROJECT_DIR/apps"
|
||||
|
||||
# Check if yq is installed
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo "Error: yq is not installed. Please install it first."
|
||||
echo "You can install it with: wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Source the main .env file
|
||||
if [ ! -f "$ENV_FILE" ]; then
|
||||
echo "Error: Environment file not found: $ENV_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Turn on allexport to automatically export all variables
|
||||
set -a
|
||||
source "$ENV_FILE"
|
||||
set +a
|
||||
|
||||
# Function to parse YAML using yq
|
||||
parse_yaml() {
|
||||
local yaml_file=$1
|
||||
|
||||
# Extract the values we need using yq
|
||||
local name=$(yq eval '.name' "$yaml_file")
|
||||
local install=$(yq eval '.install' "$yaml_file")
|
||||
|
||||
# Convert boolean to 1/0 for consistency
|
||||
if [ "$install" = "true" ]; then
|
||||
install="1"
|
||||
elif [ "$install" = "false" ]; then
|
||||
install="0"
|
||||
fi
|
||||
|
||||
# Get dependencies as space-separated string
|
||||
local requires=""
|
||||
if yq eval 'has("requires")' "$yaml_file" | grep -q "true"; then
|
||||
requires=$(yq eval '.requires[].name' "$yaml_file" | tr '\n' ' ' | sed 's/ $//')
|
||||
fi
|
||||
|
||||
# Return the parsed data as a single line
|
||||
echo "$name|$install|$requires"
|
||||
}
|
||||
|
||||
# Resolve dependencies and create a list of apps to source in the right order
|
||||
resolve_dependencies() {
|
||||
local apps=()
|
||||
local apps_to_install=()
|
||||
local deps_map=()
|
||||
|
||||
# Parse all manifest files
|
||||
for manifest in "$APPS_DIR"/*/manifest.yaml; do
|
||||
local app_dir=$(dirname "$manifest")
|
||||
local app_name=$(basename "$app_dir")
|
||||
|
||||
local parsed_data=$(parse_yaml "$manifest")
|
||||
IFS='|' read -r name install requires <<< "$parsed_data"
|
||||
|
||||
# Add to our arrays
|
||||
apps+=("$name")
|
||||
if [ "$install" = "1" ] || [ "$install" = "true" ]; then
|
||||
apps_to_install+=("$name")
|
||||
deps_map+=("$name:$requires")
|
||||
fi
|
||||
done
|
||||
|
||||
# Create an ordered list with dependencies first
|
||||
local ordered=()
|
||||
|
||||
# First add apps with no dependencies
|
||||
for app in "${apps_to_install[@]}"; do
|
||||
local has_deps=false
|
||||
for dep_entry in "${deps_map[@]}"; do
|
||||
local app_name=$(echo "$dep_entry" | cut -d':' -f1)
|
||||
local deps=$(echo "$dep_entry" | cut -d':' -f2)
|
||||
|
||||
if [ "$app_name" = "$app" ] && [ -n "$deps" ]; then
|
||||
has_deps=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$has_deps" = false ]; then
|
||||
ordered+=("$app")
|
||||
fi
|
||||
done
|
||||
|
||||
# Now add apps with resolved dependencies
|
||||
local remaining=()
|
||||
for app in "${apps_to_install[@]}"; do
|
||||
if ! echo " ${ordered[*]} " | grep -q " $app "; then
|
||||
remaining+=("$app")
|
||||
fi
|
||||
done
|
||||
|
||||
while [ ${#remaining[@]} -gt 0 ]; do
|
||||
local progress=false
|
||||
|
||||
for app in "${remaining[@]}"; do
|
||||
local all_deps_resolved=true
|
||||
|
||||
# Find the dependencies for this app
|
||||
local app_deps=""
|
||||
for dep_entry in "${deps_map[@]}"; do
|
||||
local app_name=$(echo "$dep_entry" | cut -d':' -f1)
|
||||
local deps=$(echo "$dep_entry" | cut -d':' -f2)
|
||||
|
||||
if [ "$app_name" = "$app" ]; then
|
||||
app_deps="$deps"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if all dependencies are in the ordered list
|
||||
if [ -n "$app_deps" ]; then
|
||||
for dep in $app_deps; do
|
||||
if ! echo " ${ordered[*]} " | grep -q " $dep "; then
|
||||
all_deps_resolved=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "$all_deps_resolved" = true ]; then
|
||||
ordered+=("$app")
|
||||
progress=true
|
||||
fi
|
||||
done
|
||||
|
||||
# If no progress was made, we have a circular dependency
|
||||
if [ "$progress" = false ]; then
|
||||
echo "Warning: Circular dependency detected in app manifests"
|
||||
# Add remaining apps to avoid getting stuck
|
||||
ordered+=("${remaining[@]}")
|
||||
break
|
||||
fi
|
||||
|
||||
# Update remaining list
|
||||
local new_remaining=()
|
||||
for app in "${remaining[@]}"; do
|
||||
if ! echo " ${ordered[*]} " | grep -q " $app "; then
|
||||
new_remaining+=("$app")
|
||||
fi
|
||||
done
|
||||
remaining=("${new_remaining[@]}")
|
||||
done
|
||||
|
||||
echo "${ordered[@]}"
|
||||
}
|
||||
|
||||
# Get ordered list of apps to source
|
||||
ordered_apps=($(resolve_dependencies))
|
||||
|
||||
# Source app .env files in dependency order
|
||||
# echo "Sourcing app environment files..."
|
||||
for app in "${ordered_apps[@]}"; do
|
||||
app_env_file="$APPS_DIR/$app/config/.env"
|
||||
if [ -f "$app_env_file" ]; then
|
||||
# echo " - $app"
|
||||
set -a
|
||||
source "$app_env_file"
|
||||
set +a
|
||||
fi
|
||||
done
|
||||
|
||||
# Add bin directory to PATH
|
||||
export PATH="$BIN_DIR:$PATH"
|
@@ -1,2 +0,0 @@
|
||||
KUBECONFIG=~/.kube/config
|
||||
export KUBECONFIG
|
4
my-scaffold/.gitignore
vendored
4
my-scaffold/.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
secrets.yaml
|
||||
.wildcloud/cache
|
||||
.bots/*/sessions
|
||||
backup/
|
||||
backup/
|
||||
.working
|
||||
.claude
|
||||
|
0
my-scaffold/.wildcloud/.gitkeep
Normal file
0
my-scaffold/.wildcloud/.gitkeep
Normal file
@@ -1,6 +1,5 @@
|
||||
wildcloud:
|
||||
# You can also use a local path for the repository such as /home/adam/repos/wild-cloud
|
||||
repository: https://github.com/payneio/wild-cloud
|
||||
repository: /home/adam/wildcloud
|
||||
operator:
|
||||
email: adam@adam.tld
|
||||
cloud:
|
||||
@@ -17,15 +16,40 @@ cloud:
|
||||
storageCapacity: 250Gi
|
||||
dns:
|
||||
ip: 192.168.8.218
|
||||
externalResolver: 1.1.1.1
|
||||
dhcpRange: 192.168.8.100,192.168.8.239
|
||||
dnsmasqInterface: enp5s0
|
||||
dnsmasq:
|
||||
interface: enp5s0
|
||||
username: adam
|
||||
cluster:
|
||||
endpoint: computer-01
|
||||
endpointIp: 192.168.8.241
|
||||
nodes:
|
||||
talos:
|
||||
version: v1.10.3
|
||||
schematic:
|
||||
customization:
|
||||
extraKernelArgs:
|
||||
- -talos.halt_if_installed
|
||||
systemExtensions:
|
||||
officialExtensions:
|
||||
- siderolabs/gvisor
|
||||
- siderolabs/intel-ucode
|
||||
- siderolabs/iscsi-tools
|
||||
- siderolabs/util-linux-tools
|
||||
schematicId: TBD
|
||||
control:
|
||||
vip: 192.168.8.30
|
||||
node1:
|
||||
ip: 192.168.8.31
|
||||
node2:
|
||||
ip: 192.168.8.32
|
||||
node3:
|
||||
ip: 192.168.8.33
|
||||
name: adam-cluster
|
||||
ipaddressPool: 192.168.8.20-192.168.8.29
|
||||
loadBalancerIp: 192.168.8.20
|
||||
kubernetes:
|
||||
config: /home/adam/.kube/config
|
||||
context: default
|
||||
loadBalancerRange: 192.168.8.240-192.168.8.250
|
||||
dashboard:
|
||||
adminUsername: admin
|
||||
certManager:
|
||||
@@ -33,6 +57,7 @@ cluster:
|
||||
cloudflare:
|
||||
domain: adam.tld
|
||||
ownerId: cloud-adam-cluster
|
||||
nodes:
|
||||
talos:
|
||||
version: v1.10.3
|
||||
externalDns:
|
||||
ownerId: cloud-adam-cluster
|
||||
dockerRegistry:
|
||||
storage: 10Gi
|
||||
|
246
my-scaffold/docs/node-setup.md
Normal file
246
my-scaffold/docs/node-setup.md
Normal file
@@ -0,0 +1,246 @@
|
||||
# Node Setup Guide
|
||||
|
||||
This guide covers setting up Talos Linux nodes for your Kubernetes cluster using USB boot.
|
||||
|
||||
## Overview
|
||||
|
||||
There are two main approaches for booting Talos nodes:
|
||||
|
||||
1. **USB Boot** (covered here) - Boot from a custom USB drive with system extensions
|
||||
2. **PXE Boot** - Network boot using dnsmasq setup (see `setup/dnsmasq/README.md`)
|
||||
|
||||
## USB Boot Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Target hardware for Kubernetes nodes
|
||||
- USB drive (8GB+ recommended)
|
||||
- Admin access to create bootable USB drives
|
||||
|
||||
### Step 1: Upload Schematic and Download Custom Talos ISO
|
||||
|
||||
First, upload the system extensions schematic to Talos Image Factory, then download the custom ISO.
|
||||
|
||||
```bash
|
||||
# Upload schematic configuration to get schematic ID
|
||||
wild-talos-schema
|
||||
|
||||
# Download custom ISO with system extensions
|
||||
wild-talos-iso
|
||||
```
|
||||
|
||||
The custom ISO includes system extensions (iscsi-tools, util-linux-tools, intel-ucode, gvisor) needed for the cluster and is saved to `.wildcloud/iso/talos-v1.10.3-metal-amd64.iso`.
|
||||
|
||||
### Step 2: Create Bootable USB Drive
|
||||
|
||||
#### Linux (Recommended)
|
||||
|
||||
```bash
|
||||
# Find your USB device (be careful to select the right device!)
|
||||
lsblk
|
||||
sudo dmesg | tail # Check for recently connected USB devices
|
||||
|
||||
# Create bootable USB (replace /dev/sdX with your USB device)
|
||||
sudo dd if=.wildcloud/iso/talos-v1.10.3-metal-amd64.iso of=/dev/sdX bs=4M status=progress sync
|
||||
|
||||
# Verify the write completed
|
||||
sync
|
||||
```
|
||||
|
||||
**⚠️ Warning**: Double-check the device path (`/dev/sdX`). Writing to the wrong device will destroy data!
|
||||
|
||||
#### macOS
|
||||
|
||||
```bash
|
||||
# Find your USB device
|
||||
diskutil list
|
||||
|
||||
# Unmount the USB drive (replace diskX with your USB device)
|
||||
diskutil unmountDisk /dev/diskX
|
||||
|
||||
# Create bootable USB
|
||||
sudo dd if=.wildcloud/iso/talos-v1.10.3-metal-amd64.iso of=/dev/rdiskX bs=4m
|
||||
|
||||
# Eject when complete
|
||||
diskutil eject /dev/diskX
|
||||
```
|
||||
|
||||
#### Windows
|
||||
|
||||
Use one of these tools:
|
||||
|
||||
1. **Rufus** (Recommended)
|
||||
|
||||
- Download from https://rufus.ie/
|
||||
- Select the Talos ISO file
|
||||
- Choose your USB drive
|
||||
- Use "DD Image" mode
|
||||
- Click "START"
|
||||
|
||||
2. **Balena Etcher**
|
||||
|
||||
- Download from https://www.balena.io/etcher/
|
||||
- Flash from file → Select Talos ISO
|
||||
- Select target USB drive
|
||||
- Flash!
|
||||
|
||||
3. **Command Line** (Windows 10/11)
|
||||
|
||||
```cmd
|
||||
# List disks to find USB drive number
|
||||
diskpart
|
||||
list disk
|
||||
exit
|
||||
|
||||
# Write ISO (replace X with your USB disk number)
|
||||
dd if=.wildcloud\iso\talos-v1.10.3-metal-amd64.iso of=\\.\PhysicalDriveX bs=4M --progress
|
||||
```
|
||||
|
||||
### Step 3: Boot Target Machine
|
||||
|
||||
1. **Insert USB** into target machine
|
||||
2. **Boot from USB**:
|
||||
- Restart machine and enter BIOS/UEFI (usually F2, F12, DEL, or ESC during startup)
|
||||
- Change boot order to prioritize USB drive
|
||||
- Or use one-time boot menu (usually F12)
|
||||
3. **Talos will boot** in maintenance mode with a DHCP IP
|
||||
|
||||
### Step 4: Hardware Detection and Configuration
|
||||
|
||||
Once the machine boots, it will be in maintenance mode with a DHCP IP address.
|
||||
|
||||
```bash
|
||||
# Find the node's maintenance IP (check your router/DHCP server)
|
||||
# Then detect hardware and register the node
|
||||
cd setup/cluster-nodes
|
||||
./detect-node-hardware.sh <maintenance-ip> <node-number>
|
||||
|
||||
# Example: Node got DHCP IP 192.168.8.150, registering as node 1
|
||||
./detect-node-hardware.sh 192.168.8.150 1
|
||||
```
|
||||
|
||||
This script will:
|
||||
|
||||
- Discover network interface names (e.g., `enp4s0`)
|
||||
- List available disks for installation
|
||||
- Update `config.yaml` with node-specific hardware settings
|
||||
|
||||
### Step 5: Generate and Apply Configuration
|
||||
|
||||
```bash
|
||||
# Generate machine configurations with detected hardware
|
||||
./generate-machine-configs.sh
|
||||
|
||||
# Apply configuration (node will reboot with static IP)
|
||||
talosctl apply-config --insecure -n <maintenance-ip> --file final/controlplane-node-<number>.yaml
|
||||
|
||||
# Example:
|
||||
talosctl apply-config --insecure -n 192.168.8.150 --file final/controlplane-node-1.yaml
|
||||
```
|
||||
|
||||
### Step 6: Verify Installation
|
||||
|
||||
After reboot, the node should come up with its assigned static IP:
|
||||
|
||||
```bash
|
||||
# Check connectivity (node 1 should be at 192.168.8.31)
|
||||
ping 192.168.8.31
|
||||
|
||||
# Verify system extensions are installed
|
||||
talosctl -e 192.168.8.31 -n 192.168.8.31 get extensions
|
||||
|
||||
# Check for iscsi tools
|
||||
talosctl -e 192.168.8.31 -n 192.168.8.31 list /usr/local/bin/ | grep iscsi
|
||||
```
|
||||
|
||||
## Repeat for Additional Nodes
|
||||
|
||||
For each additional control plane node:
|
||||
|
||||
1. Boot with the same USB drive
|
||||
2. Run hardware detection with the new maintenance IP and node number
|
||||
3. Generate and apply configurations
|
||||
4. Verify the node comes up at its static IP
|
||||
|
||||
Example for node 2:
|
||||
|
||||
```bash
|
||||
./detect-node-hardware.sh 192.168.8.151 2
|
||||
./generate-machine-configs.sh
|
||||
talosctl apply-config --insecure -n 192.168.8.151 --file final/controlplane-node-2.yaml
|
||||
```
|
||||
|
||||
## Cluster Bootstrap
|
||||
|
||||
Once all control plane nodes are configured:
|
||||
|
||||
```bash
|
||||
# Bootstrap the cluster using the VIP
|
||||
talosctl bootstrap -n 192.168.8.30
|
||||
|
||||
# Get kubeconfig
|
||||
talosctl kubeconfig
|
||||
|
||||
# Verify cluster
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### USB Boot Issues
|
||||
|
||||
- **Machine won't boot from USB**: Check BIOS boot order, disable Secure Boot if needed
|
||||
- **Talos doesn't start**: Verify ISO was written correctly, try re-creating USB
|
||||
- **Network issues**: Ensure DHCP is available on your network
|
||||
|
||||
### Hardware Detection Issues
|
||||
|
||||
- **Node not accessible**: Check IP assignment, firewall settings
|
||||
- **Wrong interface detected**: Manual override in `config.yaml` if needed
|
||||
- **Disk not found**: Verify disk size (must be >10GB), check disk health
|
||||
|
||||
### Installation Issues
|
||||
|
||||
- **Static IP not assigned**: Check network configuration in machine config
|
||||
- **Extensions not installed**: Verify ISO includes extensions, check upgrade logs
|
||||
- **Node won't join cluster**: Check certificates, network connectivity to VIP
|
||||
|
||||
### Checking Logs
|
||||
|
||||
```bash
|
||||
# View system logs
|
||||
talosctl -e <node-ip> -n <node-ip> logs machined
|
||||
|
||||
# Check kernel messages
|
||||
talosctl -e <node-ip> -n <node-ip> dmesg
|
||||
|
||||
# Monitor services
|
||||
talosctl -e <node-ip> -n <node-ip> get services
|
||||
```
|
||||
|
||||
## System Extensions Included
|
||||
|
||||
The custom ISO includes these extensions:
|
||||
|
||||
- **siderolabs/iscsi-tools**: iSCSI initiator tools for persistent storage
|
||||
- **siderolabs/util-linux-tools**: Utility tools including fstrim for storage
|
||||
- **siderolabs/intel-ucode**: Intel CPU microcode updates (harmless on AMD)
|
||||
- **siderolabs/gvisor**: Container runtime sandbox (optional security enhancement)
|
||||
|
||||
These extensions enable:
|
||||
|
||||
- Longhorn distributed storage
|
||||
- Improved security isolation
|
||||
- CPU microcode updates
|
||||
- Storage optimization tools
|
||||
|
||||
## Next Steps
|
||||
|
||||
After all nodes are configured:
|
||||
|
||||
1. **Install CNI**: Deploy a Container Network Interface (Cilium, Calico, etc.)
|
||||
2. **Install CSI**: Deploy Container Storage Interface (Longhorn for persistent storage)
|
||||
3. **Deploy workloads**: Your applications and services
|
||||
4. **Monitor cluster**: Set up monitoring and logging
|
||||
|
||||
See the main project documentation for application deployment guides.
|
49
my-scaffold/env.sh
Normal file
49
my-scaffold/env.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Set the WC_HOME environment variable to this script's directory.
|
||||
# This variable is used consistently across the Wild Config scripts.
|
||||
export WC_HOME="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" && pwd)"
|
||||
|
||||
# Add bin to path first so wild-config is available
|
||||
export PATH="$WC_HOME/bin:$PATH"
|
||||
|
||||
# Install kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Error: kubectl is not installed. Installing."
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
fi
|
||||
|
||||
# Install talosctl
|
||||
if ! command -v talosctl &> /dev/null; then
|
||||
echo "Error: talosctl is not installed. Installing."
|
||||
curl -sL https://talos.dev/install | sh
|
||||
fi
|
||||
|
||||
|
||||
# Check if gomplate is installed
|
||||
if ! command -v gomplate &> /dev/null; then
|
||||
echo "Error: gomplate is not installed. Please install gomplate first."
|
||||
echo "Visit: https://docs.gomplate.ca/installing/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KUBECONFIG=~/.kube/config
|
||||
export KUBECONFIG
|
||||
|
||||
# Use cluster name as both talos and kubectl context name
|
||||
CLUSTER_NAME=$(wild-config cluster.name)
|
||||
if [ -z "${CLUSTER_NAME}" ] || [ "${CLUSTER_NAME}" = "null" ]; then
|
||||
echo "Error: cluster.name not set in config.yaml"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Only try to use the kubectl context if it exists
|
||||
if kubectl config get-contexts "${CLUSTER_NAME}" >/dev/null 2>&1; then
|
||||
kubectl config use-context "${CLUSTER_NAME}"
|
||||
echo "Using Kubernetes context: ${CLUSTER_NAME}"
|
||||
# else
|
||||
# echo "Kubernetes context '${CLUSTER_NAME}' not found, skipping context switch"
|
||||
fi
|
@@ -1,5 +1,7 @@
|
||||
# Setup instructions
|
||||
|
||||
Install dependencies:
|
||||
|
||||
Follow the instructions to [set up a dnsmasq machine](./dnsmasq/README.md).
|
||||
|
||||
Follow the instructions to [set up cluster nodes](./cluster-nodes/README.md).
|
||||
|
@@ -1,90 +1,235 @@
|
||||
# Cluster Node Setup
|
||||
|
||||
Cluster node setup is WIP. Any kubernetes setup will do. Currently, we have a working cluster using each of these methods and are moving towards Talos.
|
||||
This directory contains automation for setting up Talos Kubernetes cluster nodes with static IP configuration.
|
||||
|
||||
## k3s cluster node setup
|
||||
## Hardware Detection and Setup (Recommended)
|
||||
|
||||
K3s provides a fully-compliant Kubernetes distribution in a small footprint.
|
||||
The automated setup discovers hardware configuration from nodes in maintenance mode and generates machine configurations with the correct interface names and disk paths.
|
||||
|
||||
To set up control nodes:
|
||||
### Prerequisites
|
||||
|
||||
1. `source .env`
|
||||
2. Boot nodes with Talos ISO in maintenance mode
|
||||
3. Nodes must be accessible on the network
|
||||
|
||||
### Hardware Discovery Workflow
|
||||
|
||||
```bash
|
||||
# Install K3s without the default load balancer (we'll use MetalLB)
|
||||
curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode=644 --disable servicelb --disable metallb
|
||||
# ONE-TIME CLUSTER INITIALIZATION (run once per cluster)
|
||||
./init-cluster.sh
|
||||
|
||||
# Set up kubectl configuration
|
||||
mkdir -p ~/.kube
|
||||
sudo cat /etc/rancher/k3s/k3s.yaml > ~/.kube/config
|
||||
chmod 600 ~/.kube/config
|
||||
# FOR EACH CONTROL PLANE NODE:
|
||||
|
||||
# 1. Boot node with Talos ISO (it will get a DHCP IP in maintenance mode)
|
||||
# 2. Detect hardware and update config.yaml
|
||||
./detect-node-hardware.sh <maintenance-ip> <node-number>
|
||||
|
||||
# Example: Node boots at 192.168.8.168, register as node 1
|
||||
./detect-node-hardware.sh 192.168.8.168 1
|
||||
|
||||
# 3. Generate machine config for registered nodes
|
||||
./generate-machine-configs.sh
|
||||
|
||||
# 4. Apply configuration - node will reboot with static IP
|
||||
talosctl apply-config --insecure -n 192.168.8.168 --file final/controlplane-node-1.yaml
|
||||
|
||||
# 5. Wait for reboot, node should come up at its target static IP (192.168.8.31)
|
||||
|
||||
# Repeat steps 1-5 for additional control plane nodes
|
||||
```
|
||||
|
||||
Set up the infrastructure services after these are running, then you can add more worker nodes with:
|
||||
The `detect-node-hardware.sh` script will:
|
||||
|
||||
- Connect to nodes in maintenance mode via talosctl
|
||||
- Discover active ethernet interfaces (e.g., `enp4s0` instead of hardcoded `eth0`)
|
||||
- Discover available installation disks (>10GB)
|
||||
- Update `config.yaml` with per-node hardware configuration
|
||||
- Provide next steps for machine config generation
|
||||
|
||||
The `init-cluster.sh` script will:
|
||||
|
||||
- Generate Talos cluster secrets and base configurations (once per cluster)
|
||||
- Set up talosctl context with cluster certificates
|
||||
- Configure VIP endpoint for cluster communication
|
||||
|
||||
The `generate-machine-configs.sh` script will:
|
||||
|
||||
- Check which nodes have been hardware-detected
|
||||
- Compile network configuration templates with discovered hardware settings
|
||||
- Create final machine configurations for registered nodes only
|
||||
- Include system extensions for Longhorn (iscsi-tools, util-linux-tools)
|
||||
- Update talosctl context with registered node IPs
|
||||
|
||||
### Cluster Bootstrap
|
||||
|
||||
After all control plane nodes are configured with static IPs:
|
||||
|
||||
```bash
|
||||
# On your master node, get the node token
|
||||
NODE_TOKEN=`sudo cat /var/lib/rancher/k3s/server/node-token`
|
||||
MASTER_IP=192.168.8.222
|
||||
# On each new node, join the cluster
|
||||
# Bootstrap the cluster using any control node
|
||||
talosctl bootstrap --nodes 192.168.8.31 --endpoint 192.168.8.31
|
||||
|
||||
curl -sfL https://get.k3s.io | K3S_URL=https://$MASTER_IP:6443 K3S_TOKEN=$NODE_TOKEN sh -
|
||||
```
|
||||
|
||||
## Talos cluster node setup
|
||||
|
||||
This is a new experimental method for setting up cluster nodes. We're currently working through the simplest bootstrapping experience.
|
||||
|
||||
Currently, though, all these steps are manual.
|
||||
|
||||
Copy this entire directory to your personal cloud folder and modify it as necessary as you install. We suggest putting it in `cluster/bootstrap`.
|
||||
|
||||
```bash
|
||||
|
||||
# Install kubectl
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Install talosctl
|
||||
curl -sL https://talos.dev/install | sh
|
||||
|
||||
# In your LAN Router (which is your DHCP server),
|
||||
|
||||
CLUSTER_NAME=test-cluster
|
||||
VIP=192.168.8.20 # Non-DHCP
|
||||
|
||||
# Boot your nodes with the ISO and put their IP addresses here. Pin in DHCP.
|
||||
# Nodes must all be on the same switch.
|
||||
# TODO: How to set these static on boot?
|
||||
CONTROL_NODE_1=192.168.8.21
|
||||
CONTROL_NODE_2=192.168.8.22
|
||||
CONTROL_NODE_3=192.168.8.23
|
||||
|
||||
# Generate cluster config files (including pki and tokens)
|
||||
cd generated
|
||||
talosctl gen secrets -o secrets.yaml
|
||||
talosctl gen config --with-secrets secrets.yaml $CLUSTER_NAME https://$VIP:6443
|
||||
talosctl config merge ./talosconfig
|
||||
cd ..
|
||||
|
||||
# If the disk you want to install Talos on isn't /dev/sda, you should
|
||||
# update to the disk you want in patch/controlplane.yml and patch/worker.yaml. If you have already attempted to install a node and received an error about not being able to find /dev/sda, you can see what disks are available on it with:
|
||||
#
|
||||
# talosctl -n $VIP get disks --insecure
|
||||
|
||||
# See https://www.talos.dev/v1.10/talos-guides/configuration/patching/
|
||||
talosctl machineconfig patch generated/controlplane.yaml --patch @patch/controlplane.yaml -o final/controlplane.yaml
|
||||
talosctl machineconfig patch generated/worker.yaml --patch @patch/worker.yaml -o final/worker.yaml
|
||||
$
|
||||
|
||||
# Apply control plane config
|
||||
talosctl apply-config --insecure -n $CONTROL_NODE_1,$CONTROL_NODE_2,$CONTROL_NODE_3 --file final/controlplane.yaml
|
||||
|
||||
# Bootstrap cluster on control plan
|
||||
talosctl bootstrap -n $VIP
|
||||
|
||||
# Merge new cluster information into kubeconfig
|
||||
# Get kubeconfig
|
||||
talosctl kubeconfig
|
||||
|
||||
# You are now ready to use both `talosctl` and `kubectl` against your new cluster.
|
||||
# Verify cluster is ready
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Complete Example
|
||||
|
||||
Here's a complete example of setting up a 3-node control plane:
|
||||
|
||||
```bash
|
||||
# CLUSTER INITIALIZATION (once per cluster)
|
||||
./init-cluster.sh
|
||||
|
||||
# NODE 1
|
||||
# Boot node with Talos ISO, it gets DHCP IP 192.168.8.168
|
||||
./detect-node-hardware.sh 192.168.8.168 1
|
||||
./generate-machine-configs.sh
|
||||
talosctl apply-config --insecure -n 192.168.8.168 --file final/controlplane-node-1.yaml
|
||||
# Node reboots and comes up at 192.168.8.31
|
||||
|
||||
# NODE 2
|
||||
# Boot second node with Talos ISO, it gets DHCP IP 192.168.8.169
|
||||
./detect-node-hardware.sh 192.168.8.169 2
|
||||
./generate-machine-configs.sh
|
||||
talosctl apply-config --insecure -n 192.168.8.169 --file final/controlplane-node-2.yaml
|
||||
# Node reboots and comes up at 192.168.8.32
|
||||
|
||||
# NODE 3
|
||||
# Boot third node with Talos ISO, it gets DHCP IP 192.168.8.170
|
||||
./detect-node-hardware.sh 192.168.8.170 3
|
||||
./generate-machine-configs.sh
|
||||
talosctl apply-config --insecure -n 192.168.8.170 --file final/controlplane-node-3.yaml
|
||||
# Node reboots and comes up at 192.168.8.33
|
||||
|
||||
# CLUSTER BOOTSTRAP
|
||||
talosctl bootstrap -n 192.168.8.30
|
||||
talosctl kubeconfig
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### Per-Node Configuration
|
||||
|
||||
Each control plane node has its own configuration block in `config.yaml`:
|
||||
|
||||
```yaml
|
||||
cluster:
|
||||
nodes:
|
||||
control:
|
||||
vip: 192.168.8.30
|
||||
node1:
|
||||
ip: 192.168.8.31
|
||||
interface: enp4s0 # Discovered automatically
|
||||
disk: /dev/sdb # Selected during hardware detection
|
||||
node2:
|
||||
ip: 192.168.8.32
|
||||
# interface and disk added after hardware detection
|
||||
node3:
|
||||
ip: 192.168.8.33
|
||||
# interface and disk added after hardware detection
|
||||
```
|
||||
|
||||
Worker nodes use DHCP by default. You can use the same hardware detection process for worker nodes if static IPs are needed.
|
||||
|
||||
## Talosconfig Management
|
||||
|
||||
### Context Naming and Conflicts
|
||||
|
||||
When running `talosctl config merge ./generated/talosconfig`, if a context with the same name already exists, talosctl will create an enumerated version (e.g., `demo-cluster-2`).
|
||||
|
||||
**For a clean setup:**
|
||||
|
||||
- Delete existing contexts before merging: `talosctl config contexts` then `talosctl config context <name> --remove`
|
||||
- Or use `--force` to overwrite: `talosctl config merge ./generated/talosconfig --force`
|
||||
|
||||
**Recommended approach for new clusters:**
|
||||
|
||||
```bash
|
||||
# Remove old context if rebuilding cluster
|
||||
talosctl config context demo-cluster --remove || true
|
||||
|
||||
# Merge new configuration
|
||||
talosctl config merge ./generated/talosconfig
|
||||
talosctl config endpoint 192.168.8.30
|
||||
talosctl config node 192.168.8.31 # Add nodes as they are registered
|
||||
```
|
||||
|
||||
### Context Configuration Timeline
|
||||
|
||||
1. **After first node hardware detection**: Merge talosconfig and set endpoint/first node
|
||||
2. **After additional nodes**: Add them to the existing context with `talosctl config node <ip1> <ip2> <ip3>`
|
||||
3. **Before cluster bootstrap**: Ensure all control plane nodes are in the node list
|
||||
|
||||
### System Extensions
|
||||
|
||||
All nodes include:
|
||||
|
||||
- `siderolabs/iscsi-tools`: Required for Longhorn storage
|
||||
- `siderolabs/util-linux-tools`: Utility tools for storage operations
|
||||
|
||||
### Hardware Detection
|
||||
|
||||
The `detect-node-hardware.sh` script automatically discovers:
|
||||
|
||||
- **Network interfaces**: Finds active ethernet interfaces (no more hardcoded `eth0`)
|
||||
- **Installation disks**: Lists available disks >10GB for interactive selection
|
||||
- **Per-node settings**: Updates `config.yaml` with hardware-specific configuration
|
||||
|
||||
This eliminates the need to manually configure hardware settings and handles different hardware configurations across nodes.
|
||||
|
||||
### Template Structure
|
||||
|
||||
Configuration templates are stored in `patch.templates/` and use gomplate syntax:
|
||||
|
||||
- `controlplane-node-1.yaml`: Template for first control plane node
|
||||
- `controlplane-node-2.yaml`: Template for second control plane node
|
||||
- `controlplane-node-3.yaml`: Template for third control plane node
|
||||
- `worker.yaml`: Template for worker nodes
|
||||
|
||||
Templates use per-node variables from `config.yaml`:
|
||||
|
||||
- `{{ .cluster.nodes.control.node1.ip }}`
|
||||
- `{{ .cluster.nodes.control.node1.interface }}`
|
||||
- `{{ .cluster.nodes.control.node1.disk }}`
|
||||
- `{{ .cluster.nodes.control.vip }}`
|
||||
|
||||
The `wild-compile-template-dir` command processes all templates and outputs compiled configurations to the `patch/` directory.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Hardware Detection Issues
|
||||
|
||||
```bash
|
||||
# Check if node is accessible in maintenance mode
|
||||
talosctl -n <NODE_IP> version --insecure
|
||||
|
||||
# View available network interfaces
|
||||
talosctl -n <NODE_IP> get links --insecure
|
||||
|
||||
# View available disks
|
||||
talosctl -n <NODE_IP> get disks --insecure
|
||||
```
|
||||
|
||||
### Manual Hardware Discovery
|
||||
|
||||
If the automatic detection fails, you can manually inspect hardware:
|
||||
|
||||
```bash
|
||||
# Find active ethernet interfaces
|
||||
talosctl -n <NODE_IP> get links --insecure -o json | jq -s '.[] | select(.spec.operationalState == "up" and .spec.type == "ether" and .metadata.id != "lo") | .metadata.id'
|
||||
|
||||
# Find suitable installation disks
|
||||
talosctl -n <NODE_IP> get disks --insecure -o json | jq -s '.[] | select(.spec.size > 10000000000) | .metadata.id'
|
||||
```
|
||||
|
||||
### Node Status
|
||||
|
||||
```bash
|
||||
# View machine configuration (only works after config is applied)
|
||||
talosctl -n <NODE_IP> get machineconfig
|
||||
```
|
||||
|
53
setup/cluster-nodes/create-installer-image.sh
Executable file
53
setup/cluster-nodes/create-installer-image.sh
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Talos custom installer image creation script
|
||||
# This script generates installer image URLs using the centralized schematic ID
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get Talos version and schematic ID from config
|
||||
TALOS_VERSION=$(wild-config cluster.nodes.talos.version)
|
||||
SCHEMATIC_ID=$(wild-config cluster.nodes.talos.schematicId)
|
||||
|
||||
echo "Creating custom Talos installer image..."
|
||||
echo "Talos version: $TALOS_VERSION"
|
||||
|
||||
# Check if schematic ID exists
|
||||
if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "null" ]; then
|
||||
echo "Error: No schematic ID found in config.yaml"
|
||||
echo "Run 'wild-talos-schema' first to upload schematic and get ID"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Schematic ID: $SCHEMATIC_ID"
|
||||
echo ""
|
||||
echo "Schematic includes:"
|
||||
yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "${WC_HOME}/config.yaml" | sed 's/^/ - /'
|
||||
echo ""
|
||||
|
||||
# Generate installer image URL
|
||||
INSTALLER_URL="factory.talos.dev/metal-installer/$SCHEMATIC_ID:$TALOS_VERSION"
|
||||
|
||||
echo ""
|
||||
echo "🎉 Custom installer image URL generated!"
|
||||
echo ""
|
||||
echo "Installer URL: $INSTALLER_URL"
|
||||
echo ""
|
||||
echo "Usage in machine configuration:"
|
||||
echo "machine:"
|
||||
echo " install:"
|
||||
echo " image: $INSTALLER_URL"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Update machine config templates with this installer URL"
|
||||
echo "2. Regenerate machine configurations"
|
||||
echo "3. Apply to existing nodes to trigger installation with extensions"
|
||||
echo ""
|
||||
echo "To update templates automatically, run:"
|
||||
echo " sed -i 's|image:.*|image: $INSTALLER_URL|' patch.templates/controlplane-node-*.yaml"
|
163
setup/cluster-nodes/detect-node-hardware.sh
Executable file
163
setup/cluster-nodes/detect-node-hardware.sh
Executable file
@@ -0,0 +1,163 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Node registration script for Talos cluster setup
|
||||
# This script discovers hardware configuration from a node in maintenance mode
|
||||
# and updates config.yaml with per-node hardware settings
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Usage function
|
||||
usage() {
|
||||
echo "Usage: register-node.sh <node-ip> <node-number>"
|
||||
echo ""
|
||||
echo "Register a Talos node by discovering its hardware configuration."
|
||||
echo "The node must be booted in maintenance mode and accessible via IP."
|
||||
echo ""
|
||||
echo "Arguments:"
|
||||
echo " node-ip Current IP of the node in maintenance mode"
|
||||
echo " node-number Node number (1, 2, or 3) for control plane nodes"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " ./register-node.sh 192.168.8.168 1"
|
||||
echo " ./register-node.sh 192.168.8.169 2"
|
||||
echo ""
|
||||
echo "This script will:"
|
||||
echo " - Query the node for available network interfaces"
|
||||
echo " - Query the node for available disks"
|
||||
echo " - Update config.yaml with the per-node hardware settings"
|
||||
echo " - Update patch templates to use per-node hardware"
|
||||
}
|
||||
|
||||
# Parse arguments
|
||||
if [ $# -ne 2 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_IP="$1"
|
||||
NODE_NUMBER="$2"
|
||||
|
||||
# Validate node number
|
||||
if [[ ! "$NODE_NUMBER" =~ ^[1-3]$ ]]; then
|
||||
echo "Error: Node number must be 1, 2, or 3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Registering Talos control plane node $NODE_NUMBER at $NODE_IP..."
|
||||
|
||||
# Test connectivity
|
||||
echo "Testing connectivity to node..."
|
||||
if ! talosctl -n "$NODE_IP" get links --insecure >/dev/null 2>&1; then
|
||||
echo "Error: Cannot connect to node at $NODE_IP"
|
||||
echo "Make sure the node is booted in maintenance mode and accessible."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Node is accessible"
|
||||
|
||||
# Discover network interfaces
|
||||
echo "Discovering network interfaces..."
|
||||
|
||||
# First, try to find the interface that's actually carrying traffic (has the default route)
|
||||
CONNECTED_INTERFACE=$(talosctl -n "$NODE_IP" get routes --insecure -o json 2>/dev/null | \
|
||||
jq -s -r '.[] | select(.spec.destination == "0.0.0.0/0" and .spec.gateway != null) | .spec.outLinkName' | \
|
||||
head -1)
|
||||
|
||||
if [ -n "$CONNECTED_INTERFACE" ]; then
|
||||
ACTIVE_INTERFACE="$CONNECTED_INTERFACE"
|
||||
echo "✅ Discovered connected interface (with default route): $ACTIVE_INTERFACE"
|
||||
else
|
||||
# Fallback: find any active ethernet interface
|
||||
echo "No default route found, checking for active ethernet interfaces..."
|
||||
ACTIVE_INTERFACE=$(talosctl -n "$NODE_IP" get links --insecure -o json 2>/dev/null | \
|
||||
jq -s -r '.[] | select(.spec.operationalState == "up" and .spec.type == "ether" and .metadata.id != "lo") | .metadata.id' | \
|
||||
head -1)
|
||||
|
||||
if [ -z "$ACTIVE_INTERFACE" ]; then
|
||||
echo "Error: No active ethernet interface found"
|
||||
echo "Available interfaces:"
|
||||
talosctl -n "$NODE_IP" get links --insecure
|
||||
echo ""
|
||||
echo "Available routes:"
|
||||
talosctl -n "$NODE_IP" get routes --insecure
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Discovered active interface: $ACTIVE_INTERFACE"
|
||||
fi
|
||||
|
||||
# Discover available disks
|
||||
echo "Discovering available disks..."
|
||||
AVAILABLE_DISKS=$(talosctl -n "$NODE_IP" get disks --insecure -o json 2>/dev/null | \
|
||||
jq -s -r '.[] | select(.spec.size > 10000000000) | .metadata.id' | \
|
||||
head -5)
|
||||
|
||||
if [ -z "$AVAILABLE_DISKS" ]; then
|
||||
echo "Error: No suitable disks found (must be >10GB)"
|
||||
echo "Available disks:"
|
||||
talosctl -n "$NODE_IP" get disks --insecure
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Available disks (>10GB):"
|
||||
echo "$AVAILABLE_DISKS"
|
||||
echo ""
|
||||
|
||||
# Let user choose disk
|
||||
echo "Select installation disk for node $NODE_NUMBER:"
|
||||
select INSTALL_DISK in $AVAILABLE_DISKS; do
|
||||
if [ -n "${INSTALL_DISK:-}" ]; then
|
||||
break
|
||||
fi
|
||||
echo "Invalid selection. Please try again."
|
||||
done
|
||||
|
||||
# Add /dev/ prefix if not present
|
||||
if [[ "$INSTALL_DISK" != /dev/* ]]; then
|
||||
INSTALL_DISK="/dev/$INSTALL_DISK"
|
||||
fi
|
||||
|
||||
echo "✅ Selected disk: $INSTALL_DISK"
|
||||
|
||||
# Update config.yaml with per-node configuration
|
||||
echo "Updating config.yaml with node $NODE_NUMBER configuration..."
|
||||
|
||||
CONFIG_FILE="${WC_HOME}/config.yaml"
|
||||
|
||||
# Get the target IP for this node from the existing config
|
||||
TARGET_IP=$(yq eval ".cluster.nodes.control.node${NODE_NUMBER}.ip" "$CONFIG_FILE")
|
||||
|
||||
# Use yq to update the per-node configuration
|
||||
yq eval ".cluster.nodes.control.node${NODE_NUMBER}.ip = \"$TARGET_IP\"" -i "$CONFIG_FILE"
|
||||
yq eval ".cluster.nodes.control.node${NODE_NUMBER}.interface = \"$ACTIVE_INTERFACE\"" -i "$CONFIG_FILE"
|
||||
yq eval ".cluster.nodes.control.node${NODE_NUMBER}.disk = \"$INSTALL_DISK\"" -i "$CONFIG_FILE"
|
||||
|
||||
echo "✅ Updated config.yaml for node $NODE_NUMBER:"
|
||||
echo " - Target IP: $TARGET_IP"
|
||||
echo " - Network interface: $ACTIVE_INTERFACE"
|
||||
echo " - Installation disk: $INSTALL_DISK"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "🎉 Node $NODE_NUMBER registration complete!"
|
||||
echo ""
|
||||
echo "Node configuration saved:"
|
||||
echo " - Target IP: $TARGET_IP"
|
||||
echo " - Interface: $ACTIVE_INTERFACE"
|
||||
echo " - Disk: $INSTALL_DISK"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Regenerate machine configurations:"
|
||||
echo " ./generate-machine-configs.sh"
|
||||
echo ""
|
||||
echo "2. Apply configuration to this node:"
|
||||
echo " talosctl apply-config --insecure -n $NODE_IP --file final/controlplane-node-${NODE_NUMBER}.yaml"
|
||||
echo ""
|
||||
echo "3. Wait for reboot and verify static IP connectivity"
|
||||
echo "4. Repeat registration for additional control plane nodes"
|
0
setup/cluster-nodes/final/.gitkeep
Normal file
0
setup/cluster-nodes/final/.gitkeep
Normal file
115
setup/cluster-nodes/generate-machine-configs.sh
Executable file
115
setup/cluster-nodes/generate-machine-configs.sh
Executable file
@@ -0,0 +1,115 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Talos machine configuration generation script
|
||||
# This script generates machine configs for registered nodes using existing cluster secrets
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||
|
||||
# Check if cluster has been initialized
|
||||
if [ ! -f "${NODE_SETUP_DIR}/generated/secrets.yaml" ]; then
|
||||
echo "Error: Cluster not initialized. Run ./init-cluster.sh first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get cluster configuration from config.yaml
|
||||
CLUSTER_NAME=$(wild-config cluster.name)
|
||||
VIP=$(wild-config cluster.nodes.control.vip)
|
||||
|
||||
echo "Generating machine configurations for cluster: $CLUSTER_NAME"
|
||||
|
||||
# Check which nodes have been registered (have hardware config)
|
||||
REGISTERED_NODES=()
|
||||
for i in 1 2 3; do
|
||||
if yq eval ".cluster.nodes.control.node${i}.interface" "${WC_HOME}/config.yaml" | grep -v "null" >/dev/null 2>&1; then
|
||||
NODE_IP=$(wild-config cluster.nodes.control.node${i}.ip)
|
||||
REGISTERED_NODES+=("$NODE_IP")
|
||||
echo "✅ Node $i registered: $NODE_IP"
|
||||
else
|
||||
echo "⏸️ Node $i not registered yet"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#REGISTERED_NODES[@]} -eq 0 ]; then
|
||||
echo ""
|
||||
echo "No nodes have been registered yet."
|
||||
echo "Run ./detect-node-hardware.sh <maintenance-ip> <node-number> first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create directories
|
||||
mkdir -p "${NODE_SETUP_DIR}/final" "${NODE_SETUP_DIR}/patch"
|
||||
|
||||
# Compile patch templates for registered nodes only
|
||||
echo "Compiling patch templates..."
|
||||
|
||||
for i in 1 2 3; do
|
||||
if yq eval ".cluster.nodes.control.node${i}.interface" "${WC_HOME}/config.yaml" | grep -v "null" >/dev/null 2>&1; then
|
||||
echo "Compiling template for control plane node $i..."
|
||||
cat "${NODE_SETUP_DIR}/patch.templates/controlplane-node-${i}.yaml" | wild-compile-template > "${NODE_SETUP_DIR}/patch/controlplane-node-${i}.yaml"
|
||||
fi
|
||||
done
|
||||
|
||||
# Always compile worker template (doesn't require hardware detection)
|
||||
if [ -f "${NODE_SETUP_DIR}/patch.templates/worker.yaml" ]; then
|
||||
cat "${NODE_SETUP_DIR}/patch.templates/worker.yaml" | wild-compile-template > "${NODE_SETUP_DIR}/patch/worker.yaml"
|
||||
fi
|
||||
|
||||
# Generate final machine configs for registered nodes only
|
||||
echo "Generating final machine configurations..."
|
||||
for i in 1 2 3; do
|
||||
if yq eval ".cluster.nodes.control.node${i}.interface" "${WC_HOME}/config.yaml" | grep -v "null" >/dev/null 2>&1; then
|
||||
echo "Generating config for control plane node $i..."
|
||||
talosctl machineconfig patch "${NODE_SETUP_DIR}/generated/controlplane.yaml" --patch @"${NODE_SETUP_DIR}/patch/controlplane-node-${i}.yaml" -o "${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml"
|
||||
fi
|
||||
done
|
||||
|
||||
# Always generate worker config (doesn't require hardware detection)
|
||||
if [ -f "${NODE_SETUP_DIR}/patch/worker.yaml" ]; then
|
||||
echo "Generating worker config..."
|
||||
talosctl machineconfig patch "${NODE_SETUP_DIR}/generated/worker.yaml" --patch @"${NODE_SETUP_DIR}/patch/worker.yaml" -o "${NODE_SETUP_DIR}/final/worker.yaml"
|
||||
fi
|
||||
|
||||
# Update talosctl context with registered nodes
|
||||
echo "Updating talosctl context..."
|
||||
if [ ${#REGISTERED_NODES[@]} -gt 0 ]; then
|
||||
talosctl config node "${REGISTERED_NODES[@]}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Machine configurations generated successfully!"
|
||||
echo ""
|
||||
echo "Generated configs:"
|
||||
for i in 1 2 3; do
|
||||
if [ -f "${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml" ]; then
|
||||
NODE_IP=$(wild-config cluster.nodes.control.node${i}.ip)
|
||||
echo " - ${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml (target IP: $NODE_IP)"
|
||||
fi
|
||||
done
|
||||
if [ -f "${NODE_SETUP_DIR}/final/worker.yaml" ]; then
|
||||
echo " - ${NODE_SETUP_DIR}/final/worker.yaml"
|
||||
fi
|
||||
echo ""
|
||||
echo "Current talosctl configuration:"
|
||||
talosctl config info
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Apply configurations to nodes in maintenance mode:"
|
||||
for i in 1 2 3; do
|
||||
if [ -f "${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml" ]; then
|
||||
echo " talosctl apply-config --insecure -n <maintenance-ip> --file ${NODE_SETUP_DIR}/final/controlplane-node-${i}.yaml"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
echo "2. Wait for nodes to reboot with static IPs, then bootstrap cluster with ANY control node:"
|
||||
echo " talosctl bootstrap --nodes 192.168.8.31 --endpoint 192.168.8.31"
|
||||
echo ""
|
||||
echo "3. Get kubeconfig:"
|
||||
echo " talosctl kubeconfig"
|
577
setup/cluster-nodes/generated/controlplane.yaml
Normal file
577
setup/cluster-nodes/generated/controlplane.yaml
Normal file
@@ -0,0 +1,577 @@
|
||||
version: v1alpha1 # Indicates the schema used to decode the contents.
|
||||
debug: false # Enable verbose logging to the console.
|
||||
persist: true
|
||||
# Provides machine specific configuration options.
|
||||
machine:
|
||||
type: controlplane # Defines the role of the machine within the cluster.
|
||||
token: t1yf7w.zwevymjw6v0v1q76 # The `token` is used by a machine to join the PKI of the cluster.
|
||||
# The root certificate authority of the PKI.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJT0hOamQ1blVzdVRGRXpsQmtFOVhkZUJ4b1AxMk9mY2R4a0tjQmZlU0xKbgotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K
|
||||
# Extra certificate subject alternative names for the machine's certificate.
|
||||
certSANs: []
|
||||
# # Uncomment this to enable SANs.
|
||||
# - 10.0.0.10
|
||||
# - 172.16.0.10
|
||||
# - 192.168.0.10
|
||||
|
||||
# Used to provide additional options to the kubelet.
|
||||
kubelet:
|
||||
image: ghcr.io/siderolabs/kubelet:v1.33.1 # The `image` field is an optional reference to an alternative kubelet image.
|
||||
defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
|
||||
disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
|
||||
|
||||
# # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
|
||||
# clusterDNS:
|
||||
# - 10.96.0.10
|
||||
# - 169.254.2.53
|
||||
|
||||
# # The `extraArgs` field is used to provide additional flags to the kubelet.
|
||||
# extraArgs:
|
||||
# key: value
|
||||
|
||||
# # The `extraMounts` field is used to add additional mounts to the kubelet container.
|
||||
# extraMounts:
|
||||
# - destination: /var/lib/example # Destination is the absolute path where the mount will be placed in the container.
|
||||
# type: bind # Type specifies the mount kind.
|
||||
# source: /var/lib/example # Source specifies the source path of the mount.
|
||||
# # Options are fstab style mount options.
|
||||
# options:
|
||||
# - bind
|
||||
# - rshared
|
||||
# - rw
|
||||
|
||||
# # The `extraConfig` field is used to provide kubelet configuration overrides.
|
||||
# extraConfig:
|
||||
# serverTLSBootstrap: true
|
||||
|
||||
# # The `KubeletCredentialProviderConfig` field is used to provide kubelet credential configuration.
|
||||
# credentialProviderConfig:
|
||||
# apiVersion: kubelet.config.k8s.io/v1
|
||||
# kind: CredentialProviderConfig
|
||||
# providers:
|
||||
# - apiVersion: credentialprovider.kubelet.k8s.io/v1
|
||||
# defaultCacheDuration: 12h
|
||||
# matchImages:
|
||||
# - '*.dkr.ecr.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.*.amazonaws.com.cn'
|
||||
# - '*.dkr.ecr-fips.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.us-iso-east-1.c2s.ic.gov'
|
||||
# - '*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov'
|
||||
# name: ecr-credential-provider
|
||||
|
||||
# # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
|
||||
# nodeIP:
|
||||
# # The `validSubnets` field configures the networks to pick kubelet node IP from.
|
||||
# validSubnets:
|
||||
# - 10.0.0.0/8
|
||||
# - '!10.0.0.3/32'
|
||||
# - fdc7::/16
|
||||
# Provides machine specific network configuration options.
|
||||
network: {}
|
||||
# # `interfaces` is used to define the network interface configuration.
|
||||
# interfaces:
|
||||
# - interface: enp0s1 # The interface name.
|
||||
# # Assigns static IP addresses to the interface.
|
||||
# addresses:
|
||||
# - 192.168.2.0/24
|
||||
# # A list of routes associated with the interface.
|
||||
# routes:
|
||||
# - network: 0.0.0.0/0 # The route's network (destination).
|
||||
# gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route).
|
||||
# metric: 1024 # The optional metric for the route.
|
||||
# mtu: 1500 # The interface's MTU.
|
||||
#
|
||||
# # # Picks a network device using the selector.
|
||||
|
||||
# # # select a device with bus prefix 00:*.
|
||||
# # deviceSelector:
|
||||
# # busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
|
||||
# # deviceSelector:
|
||||
# # hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard.
|
||||
# # driver: virtio_net # Kernel driver, supports matching by wildcard.
|
||||
# # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
|
||||
# # deviceSelector:
|
||||
# # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard.
|
||||
# # driver: virtio_net # Kernel driver, supports matching by wildcard.
|
||||
|
||||
# # # Bond specific options.
|
||||
# # bond:
|
||||
# # # The interfaces that make up the bond.
|
||||
# # interfaces:
|
||||
# # - enp2s0
|
||||
# # - enp2s1
|
||||
# # # Picks a network device using the selector.
|
||||
# # deviceSelectors:
|
||||
# # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard.
|
||||
# # driver: virtio_net # Kernel driver, supports matching by wildcard.
|
||||
# # mode: 802.3ad # A bond option.
|
||||
# # lacpRate: fast # A bond option.
|
||||
|
||||
# # # Bridge specific options.
|
||||
# # bridge:
|
||||
# # # The interfaces that make up the bridge.
|
||||
# # interfaces:
|
||||
# # - enxda4042ca9a51
|
||||
# # - enxae2a6774c259
|
||||
# # # Enable STP on this bridge.
|
||||
# # stp:
|
||||
# # enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
|
||||
|
||||
# # # Configure this device as a bridge port.
|
||||
# # bridgePort:
|
||||
# # master: br0 # The name of the bridge master interface
|
||||
|
||||
# # # Indicates if DHCP should be used to configure the interface.
|
||||
# # dhcp: true
|
||||
|
||||
# # # DHCP specific options.
|
||||
# # dhcpOptions:
|
||||
# # routeMetric: 1024 # The priority of all routes received via DHCP.
|
||||
|
||||
# # # Wireguard specific configuration.
|
||||
|
||||
# # # wireguard server example
|
||||
# # wireguard:
|
||||
# # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# # listenPort: 51111 # Specifies a device's listening port.
|
||||
# # # Specifies a list of peer configurations to apply to a device.
|
||||
# # peers:
|
||||
# # - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# # endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
|
||||
# # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# # allowedIPs:
|
||||
# # - 192.168.1.0/24
|
||||
# # # wireguard peer example
|
||||
# # wireguard:
|
||||
# # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# # # Specifies a list of peer configurations to apply to a device.
|
||||
# # peers:
|
||||
# # - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# # endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
|
||||
# # persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
|
||||
# # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# # allowedIPs:
|
||||
# # - 192.168.1.0/24
|
||||
|
||||
# # # Virtual (shared) IP address configuration.
|
||||
|
||||
# # # layer2 vip example
|
||||
# # vip:
|
||||
# # ip: 172.16.199.55 # Specifies the IP address to be used.
|
||||
|
||||
# # Used to statically set the nameservers for the machine.
|
||||
# nameservers:
|
||||
# - 8.8.8.8
|
||||
# - 1.1.1.1
|
||||
|
||||
# # Used to statically set arbitrary search domains.
|
||||
# searchDomains:
|
||||
# - example.org
|
||||
# - example.com
|
||||
|
||||
# # Allows for extra entries to be added to the `/etc/hosts` file
|
||||
# extraHostEntries:
|
||||
# - ip: 192.168.1.100 # The IP of the host.
|
||||
# # The host alias.
|
||||
# aliases:
|
||||
# - example
|
||||
# - example.domain.tld
|
||||
|
||||
# # Configures KubeSpan feature.
|
||||
# kubespan:
|
||||
# enabled: true # Enable the KubeSpan feature.
|
||||
|
||||
# Used to provide instructions for installations.
|
||||
install:
|
||||
disk: /dev/sda # The disk used for installations.
|
||||
image: ghcr.io/siderolabs/installer:v1.10.3 # Allows for supplying the image used to perform the installation.
|
||||
wipe: false # Indicates if the installation disk should be wiped at installation time.
|
||||
|
||||
# # Look up disk using disk attributes like model, size, serial and others.
|
||||
# diskSelector:
|
||||
# size: 4GB # Disk size.
|
||||
# model: WDC* # Disk model `/sys/block/<dev>/device/model`.
|
||||
# busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path.
|
||||
|
||||
# # Allows for supplying extra kernel args via the bootloader.
|
||||
# extraKernelArgs:
|
||||
# - talos.platform=metal
|
||||
# - reboot=k
|
||||
# Used to configure the machine's container image registry mirrors.
|
||||
registries: {}
|
||||
# # Specifies mirror configuration for each registry host namespace.
|
||||
# mirrors:
|
||||
# ghcr.io:
|
||||
# # List of endpoints (URLs) for registry mirrors to use.
|
||||
# endpoints:
|
||||
# - https://registry.insecure
|
||||
# - https://ghcr.io/v2/
|
||||
|
||||
# # Specifies TLS & auth configuration for HTTPS image registries.
|
||||
# config:
|
||||
# registry.insecure:
|
||||
# # The TLS configuration for the registry.
|
||||
# tls:
|
||||
# insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
|
||||
#
|
||||
# # # Enable mutual TLS authentication with the registry.
|
||||
# # clientIdentity:
|
||||
# # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
#
|
||||
# # # The auth configuration for this registry.
|
||||
# # auth:
|
||||
# # username: username # Optional registry authentication.
|
||||
# # password: password # Optional registry authentication.
|
||||
|
||||
# Features describe individual Talos features that can be switched on or off.
|
||||
features:
|
||||
rbac: true # Enable role-based access control (RBAC).
|
||||
stableHostname: true # Enable stable default hostname.
|
||||
apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
|
||||
diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
|
||||
# KubePrism - local proxy/load balancer on defined port that will distribute
|
||||
kubePrism:
|
||||
enabled: true # Enable KubePrism support - will start local load balancing proxy.
|
||||
port: 7445 # KubePrism port.
|
||||
# Configures host DNS caching resolver.
|
||||
hostDNS:
|
||||
enabled: true # Enable host DNS caching resolver.
|
||||
forwardKubeDNSToHost: true # Use the host DNS resolver as upstream for Kubernetes CoreDNS pods.
|
||||
|
||||
# # Configure Talos API access from Kubernetes pods.
|
||||
# kubernetesTalosAPIAccess:
|
||||
# enabled: true # Enable Talos API access from Kubernetes pods.
|
||||
# # The list of Talos API roles which can be granted for access from Kubernetes pods.
|
||||
# allowedRoles:
|
||||
# - os:reader
|
||||
# # The list of Kubernetes namespaces Talos API access is available from.
|
||||
# allowedKubernetesNamespaces:
|
||||
# - kube-system
|
||||
# Configures the node labels for the machine.
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers: ""
|
||||
|
||||
# # Provides machine specific control plane configuration options.
|
||||
|
||||
# # ControlPlane definition example.
|
||||
# controlPlane:
|
||||
# # Controller manager machine specific configuration options.
|
||||
# controllerManager:
|
||||
# disabled: false # Disable kube-controller-manager on the node.
|
||||
# # Scheduler machine specific configuration options.
|
||||
# scheduler:
|
||||
# disabled: true # Disable kube-scheduler on the node.
|
||||
|
||||
# # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
|
||||
|
||||
# # nginx static pod.
|
||||
# pods:
|
||||
# - apiVersion: v1
|
||||
# kind: pod
|
||||
# metadata:
|
||||
# name: nginx
|
||||
# spec:
|
||||
# containers:
|
||||
# - image: nginx
|
||||
# name: nginx
|
||||
|
||||
# # Allows the addition of user specified files.
|
||||
|
||||
# # MachineFiles usage example.
|
||||
# files:
|
||||
# - content: '...' # The contents of the file.
|
||||
# permissions: 0o666 # The file's permissions in octal.
|
||||
# path: /tmp/file.txt # The path of the file.
|
||||
# op: append # The operation to use
|
||||
|
||||
# # The `env` field allows for the addition of environment variables.
|
||||
|
||||
# # Environment variables definition examples.
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: info
|
||||
# GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
|
||||
# https_proxy: http://SERVER:PORT/
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: error
|
||||
# https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
|
||||
# env:
|
||||
# https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
|
||||
|
||||
# # Used to configure the machine's time settings.
|
||||
|
||||
# # Example configuration for cloudflare ntp server.
|
||||
# time:
|
||||
# disabled: false # Indicates if the time service is disabled for the machine.
|
||||
# # description: |
|
||||
# servers:
|
||||
# - time.cloudflare.com
|
||||
# bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence.
|
||||
|
||||
# # Used to configure the machine's sysctls.
|
||||
|
||||
# # MachineSysctls usage example.
|
||||
# sysctls:
|
||||
# kernel.domainname: talos.dev
|
||||
# net.ipv4.ip_forward: "0"
|
||||
# net/ipv6/conf/eth0.100/disable_ipv6: "1"
|
||||
|
||||
# # Used to configure the machine's sysfs.
|
||||
|
||||
# # MachineSysfs usage example.
|
||||
# sysfs:
|
||||
# devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
|
||||
|
||||
# # Machine system disk encryption configuration.
|
||||
# systemDiskEncryption:
|
||||
# # Ephemeral partition encryption.
|
||||
# ephemeral:
|
||||
# provider: luks2 # Encryption provider to use for the encryption.
|
||||
# # Defines the encryption keys generation and storage method.
|
||||
# keys:
|
||||
# - # Deterministically generated key from the node UUID and PartitionLabel.
|
||||
# nodeID: {}
|
||||
# slot: 0 # Key slot number for LUKS2 encryption.
|
||||
#
|
||||
# # # KMS managed encryption key.
|
||||
# # kms:
|
||||
# # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
|
||||
#
|
||||
# # # Cipher kind to use for the encryption. Depends on the encryption provider.
|
||||
# # cipher: aes-xts-plain64
|
||||
|
||||
# # # Defines the encryption sector size.
|
||||
# # blockSize: 4096
|
||||
|
||||
# # # Additional --perf parameters for the LUKS2 encryption.
|
||||
# # options:
|
||||
# # - no_read_workqueue
|
||||
# # - no_write_workqueue
|
||||
|
||||
# # Configures the udev system.
|
||||
# udev:
|
||||
# # List of udev rules to apply to the udev system
|
||||
# rules:
|
||||
# - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
|
||||
|
||||
# # Configures the logging system.
|
||||
# logging:
|
||||
# # Logging destination.
|
||||
# destinations:
|
||||
# - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
|
||||
# format: json_lines # Logs format.
|
||||
|
||||
# # Configures the kernel.
|
||||
# kernel:
|
||||
# # Kernel modules to load.
|
||||
# modules:
|
||||
# - name: brtfs # Module name.
|
||||
|
||||
# # Configures the seccomp profiles for the machine.
|
||||
# seccompProfiles:
|
||||
# - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
|
||||
# # The `value` field is used to provide the seccomp profile.
|
||||
# value:
|
||||
# defaultAction: SCMP_ACT_LOG
|
||||
|
||||
# # Override (patch) settings in the default OCI runtime spec for CRI containers.
|
||||
|
||||
# # override default open file limit
|
||||
# baseRuntimeSpecOverrides:
|
||||
# process:
|
||||
# rlimits:
|
||||
# - hard: 1024
|
||||
# soft: 1024
|
||||
# type: RLIMIT_NOFILE
|
||||
|
||||
# # Configures the node annotations for the machine.
|
||||
|
||||
# # node annotations example.
|
||||
# nodeAnnotations:
|
||||
# customer.io/rack: r13a25
|
||||
|
||||
# # Configures the node taints for the machine. Effect is optional.
|
||||
|
||||
# # node taints example.
|
||||
# nodeTaints:
|
||||
# exampleTaint: exampleTaintValue:NoSchedule
|
||||
# Provides cluster specific configuration options.
|
||||
cluster:
|
||||
id: 1DOt3ZYTVTzEG_Q2IYnScCjz1rxZYwWRHV9hGXBu1UE= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
|
||||
secret: qvOKMH5RJtMOPSLBnWCPV4apReFGTd1czZ+tfz11/jI= # Shared secret of cluster (base64 encoded random 32 bytes).
|
||||
# Provides control plane specific configuration options.
|
||||
controlPlane:
|
||||
endpoint: https://192.168.8.30:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
|
||||
clusterName: demo-cluster # Configures the cluster's name.
|
||||
# Provides cluster specific network configuration options.
|
||||
network:
|
||||
dnsDomain: cluster.local # The domain used by Kubernetes DNS.
|
||||
# The pod subnet CIDR.
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
# The service subnet CIDR.
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/12
|
||||
|
||||
# # The CNI used.
|
||||
# cni:
|
||||
# name: custom # Name of CNI to use.
|
||||
# # URLs containing manifests to apply for the CNI.
|
||||
# urls:
|
||||
# - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml
|
||||
token: ed454d.o4jsg75idc817ojs # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
|
||||
secretboxEncryptionSecret: e+8hExoi1Ap4IS6StTsScp72EXKAE2Xi+J7irS7UeG0= # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
# The base64 encoded root certificate authority used by Kubernetes.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRRWU5cFdPWEFzd09PNm9NYXNDaXRtakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTFNRFl5TXpBeU16Z3hNMW9YRFRNMU1EWXlNVEF5TXpneApNMW93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ3p0YTA1T3NWOU1NaVg4WDZEdC9xbkhWelkra2tqZ01rcjdsU1kzaERPbmVWYnBhOTJmSHlkS1QyWEgKcWN1L3FJWHpodTg0ckN0VWJuQUsyckJUekFPallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVtWEhwMmM5bGRtdFg0Y2RibDlpM0Rwd05GYzB3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBwVXVoNmIKYUMwaXdzNTh5WWVlYXVMU1JhbnEveVNUcGo2T0N4UGkvTXJpQWlFQW1DUVdRQ290NkM5b0c5TUlaeDFmMmMxcApBUFRFTHFNQm1vZ1NLSis5dXZBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpMVWF4Z2RXR0Flb1ZNRW1CYkZHUjBjbTJMK1ZxNXFsVVZMaE1USHF1ZnVvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTE8xclRrNnhYMHd5SmZ4Zm9PMytxY2RYTmo2U1NPQXlTdnVWSmplRU02ZDVWdWxyM1o4ZgpKMHBQWmNlcHk3K29oZk9HN3ppc0sxUnVjQXJhc0ZQTUF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
# The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
|
||||
aggregatorCA:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJZRENDQVFhZ0F3SUJBZ0lSQVB2Y2ZReS9pbWkzQUtZdm1GNnExcmd3Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU5UQTJNak13TWpNNE1UTmFGdzB6TlRBMk1qRXdNak00TVROYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVI5NjFKWXl4N2ZxSXJHaURhMTUvVFVTc2xoR2xjSWhzandvcGFpTDg0dzNiQVBaOVdQCjliRThKUnJOTUIvVGkxSUJwbm1IbitXZ3pjeFBnbmllYzZnWG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGQ29XYVB4engxL01IanlqcVR1WkhXY2hOeXoxTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklHNWdQRHhmYVhNVlMwTEJ5bDNLOENLZVRGNHlBQnV0Zk0vT0hKRGR6ZHNsQWlFQW1pVU9tOU5ma2pQY2ducEcKZzdqd0NQbzczNW5zNXV4d2RRdEZpbjdnMEhvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUxwZzZoSlBhR3A0ZmRPdkQwVGUwZklPSWJvWUdHdUM4OXBHbThWU3NYWE1vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFZmV0U1dNc2UzNmlLeG9nMnRlZjAxRXJKWVJwWENJYkk4S0tXb2kvT01OMndEMmZWai9XeApQQ1VhelRBZjA0dFNBYVo1aDUvbG9NM01UNEo0bm5Pb0Z3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
# The base64 encoded private key for service account token generation.
|
||||
serviceAccount:
|
||||
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBK21FdWh4OWxrbXJXeUdaeWIvcXZCU1hUcys0R2lPRDZ2TlJhRSsrR044WnZxWkI1CktTWFM0Q3pzZXE4V0dKOXV2Yzg0WmdaWk9wY3ZZbXM1T1BKdzE2MjJFSUlaV1FKeXdzZ0F2NWFsTWxUZ1BxLzEKejRtSjlURW5lajZVMUE4cXU1U3FYa3F5dzNZNFdsVUU1TnlhR0d3RE9yMlduTjFTMWI0YXh0V2ZLa1hxUjlFUApvOWVrK1g3UHQwdVhQV0RQQlNiSUE1V3BQRkdqN3dic1lMMTcwcW1GemwvRElUY1Q2S3ROb0lxYTVWVGpqRmRkCkRDY2VKQ3ZldFMvT1F2WG1pcXhtTnBPbW02eFhqcGxRMmNYVUg5b3NsSHREUG4vMUszNlBVWlpaZFJHU2lvQm0KM0RHZHlOU2huN0pCN0J6dmZzaGFqU3pxeExxUmNhaHhMcVdZV2hPUEJiTmVXZ0lTMm5CcDJRYXZhMUR2YkpneQpadGVVaW1EK0VUZlB1QXFZOW9OS0Z0eFVTaS9pTUpYMTM0ZFUvQVZPRXZqaXhPNnlQQjZHVUxpb0ppOHJRVG9TCmVDNStRWXFSU2RSMzhhWFo3R2VSaUlvR3BqMndRY2Y2emVoRHJTUUdabU5BZlpaenV3T1JGei9pRTJrWTBXRGwKV1p2RFlTSFNXbk5UdmZyQk0xN1pEYzNGdTRaSGNaWUpKVGdCMDJGS2kzcS9uRWdudy9zTEhHUEl3SVIvaDlidgpzcVRVMDJYaHRKQlgwYUE2RlFqaG1NTGNvLzF0ci84Y3BPVVcvdVhPM2Y5czZHMW1OY21qeDNVamJqU09xSlRnCmFYVTlGeWZJR2lYei9JcDg0Q2Jsb0wvRXJxQmVXVEQvV2twMWF1QThQcXp6emFmU3NrSzNnd2Rla2NjQ0F3RUEKQVFLQ0FnQWVQUEt4dSs5NE90eWNwWTY5aWx0dElPVTJqanU0elRSSzdpUnJmWnAxT0VzUHJKU05ZbldHRmJiVwpRWTN4YnY1MkFFV0hRTTRHNlpwVE93Vi8rZnVrZUk5aE9BSnRWd0tGYSswaDRPK0ExWEpObW56eWZHR3VaeW9sCmluMmo0ZjhKcjNSOGl4aVlJRG9YRFdjdFovSlk3N2FHSWhQRFRHYkVJZW81bllsVVFYbXFyd3RzcTA3NmJoVVMKUmNLZ0FEQ1FVVFRkQmZhWWc4MldGbEoyMlNZbFNpR1FTODFxUUt6RldwR01uc3RYMWZtSWlmRXNBTG9VVStpdQpIaUM5YlNyVFpaVzU1L2xrNzBWQWJMQ3dmdTFWanNqMzE2NXhLUTFJSEVmeWFsalJ4Q0VHc1dkNndWS1ZIZytLClAxZC9JZndra00yQUk1bG96K3ExSjNWMUtqenNxdGVyY0JKTWhuTVdEYUp5NzhZaGZSZnY0TlNieC9ObjEveW0KanpvWXVjd3pRVEhsd0dLZUhoNG12OWZxM3U5cVJoTlEzNmNjOHowcndmT01BOFpBMVJOOFhkOG82dkxkNitHSQpSbDV6eHpoZ283MXB5V0dNNlZ5L3FqK1F0aWVZVzUrMHdUNVFqbW5WL256bDZLSWZzZGU5Q0xzcG02RnhUWVJlCjE5YzAwemlOWE56V3dPMG4yeTZkaWpKamErZ0lmT0pzVFlFb2dJQ0MxczB0N0orRGU0cHV4anVyalRjMTdZYkcKK1BpejMySmFCVDByYUUxdWlQZ1lhL3Bta1plRjBZTFgzemc4NGhSNHF3WmZsaHdNNTIxKzBJRWRRb29jd2Yycgoyb25xTWlVd2NhaVZzWEVBdjJzRDlwRkd3UEg4MUplY2JBcWNmZkJTcjVPY3VvSmsyUUtDQVFFQS93Nm1EbnFUClliK3dvOEl1SUpUanFjdzYwY1RQZzRnS0tsVzJFRWNqMW5qQitaZ2xzblZheHdMZ0QyaXd1d29BZWg1OTUzWkgKbjFoVk5Eb2VCcGJXcXBRY2VuZjlWVzNUMXpNVjVhZTNYenR1MzkrTExZUlVHakV3NitlMWNrendUSlRBYndnZAp5TnM5TjNDNno0bkhmd0NqRHc2VDhWTVFpRVB6akEveEp3L1RTQzBwRHQ5cTFQZ0hMMHBQMllkdkxvYlpEajJLCkRFb1ErcVE3Tm1XeXlLWGQxWUhZK3VaTDZ1SVlYUDNLSjVWQ0N6ZjlHVHZRUi9XL29DdTgzZzdzdWM3YndCajMKYnN5aElWQUxDTXRXSFhCVDdmNXJJVlhuZHEwdGl5cGQ2NTJDTjBya20xRHZ2L0tsTjZJV01jRkFudjRPV1M0aAphdEt0a3d6SVZCdmdQd0tDQVFFQSswNGJXaDVBVmRreXRzUy9peUlOWDQ1MXZsT0YvQVNIZW5EVjVQUDYxSWpXCll3eXFDNTlSdG0rbEI5MDNvZjVSM0RqQTZXelFvNTdUOFBwSmY2di8wN2RHSzQ2alM5ODRoNEswSzZseWllUHAKUlVlbFpEVDNIbi9WK2hhTUFscnlCUFNyRlFyRkVqdmNOMWN3SmMwTEtDSVBpNGVNeGYwMEdiTHErQ0Fic0szQQpCT3N1cDVxWlNMQWcrRGpIVDdGYnpyOTBMSlN1QnFNNXp0cnJHa1NlbmxQNEtRbGFRMTdEeWlJT2tVZUMvekhFCmg2K1NJMXNla3JHeTNEK3NrQW9HZTlOMVQyL3RPM2lsYVhtdTRIdVkwa3NCckNtZ3EzVTZROXZ0aW8yRmluL1QKQkQ2Y3Z2aUkxN1RJa3lIZkZWZktvRklyOWhIT0RYdEFad2lSQnFsc2VRS0NBUUFyOFQ4a3dYT0E1TUN2QmZaaQpnS1JVamE0WWs5cllvMmgwOEwxa1FvMW5Gdmo4WW4wa0tOblI3YW5pbmJ2TkRhVVZaUWwyQmtmQ3FUcE12REtPCkdoQ3o1TDZmVHVyamUvK0NWUGZSMERwa2V0M1lUakF4VUZvWkJSNlRsaUVKcHozRFErRi9mNXQ2RG1PV21LSm0KdlNzVXMyeGtYTE9hWVNBNUNkUDg3b1l5bjZSY0RBUEYzeklOclFtMzJRcTJ4SUdnTjNWUDRjUlY1N0RUTGRaUgp3ZVd5Y2ZrdEhxamVXU3o5TTZUVTZKaWFoem1RcXoyOHlqUlJJWUs1T3EvWVppUGN3MG5TNTdwQmFabmRIbWc0ClJLZjZmRzdKVXdyci9GdmJjMnlrVEZGUUZadm9vTXVRQXJxN2pEZHd4VWtqbTFMaDBZMXhTZVJSL2lnUGJLVmEKOEU2TEFvSUJBQ1Yxc2h3UDBGVTdxQlNZWlZqdS9ZRlY4ZlVwN0JueDd1UHdkK0hHQUlpMzBRVTR1UXc4ZG1pMApZYXczYkhpSU9WbVRXQ1l6WXpKUWxaVWhLZDJQSFBaSkpudU5xb2Uvd1dScHRrT2Y0WVB1WmpJK2lNZlVJVlg1CmhrTGVJNGFpV2RzbFFXOUVpTFc4R0lwalE3a094Ry82QzhrbnJuTkEyQWhRcERmU1NXNWZwL1RUdmNPY0J1ZFAKNGNvK1pHOWJwNnk4Mnl0ZUNrYlJBK2Z5dUFMVlliT0dIc0szTXk1QnJQdXZjZTV6ODNIbzBEdk5qd0lZTGdsOQoxWVNCTlU3UFA4SXJkaHdlT2dXWWFVZThyTFdubHRNWi9TalZsNjZYTGRVNXJrSHQ4SThCbU1uVUwzZEVBdG5zCmg4MXV5aHNiV0FmbjE4ZTVSYmE2dlpIZU5BZ0RMemtDZ2dFQkFKRXRJemdmdE85UE5RSDlQOHBBTnFaclpqWkMKZGJaaThCUkNTUy83b2tHeWtkQzZsdFJVd0c2ajIxNXZZcjlmVW94ODU5ZkI0RjhEK0FXWHNKSm44YnlQZFBuWQpwU1ZRMEV3a045aTE4WnowTitEbWVZRC9CUmdyVHA1OVU1Mmo5Q1ZyQWNQRlphVk00b0xwaEVSeDZvSURPOEcvCk9wUEZkVnJvMFhyN1lpbENiYVJLVWVWbjZWKzFIZ25zelhiUE9sakhrcGdXSXdKb1RkNkVWVDlvbXNVeFlVejcKRUR5L2RXNmVxVFBMUHR5Q2hKZlo5WDB6M09uUWpLbzcwdHhQa1VRTmw0azhqMU9mMFhMaklacmd6MmVub0FRZgpQYXhSc1lCckhNVnI5eStXaDhZdFdESGx1NUU4NlNaMXNIaHphOHhZZWpoQXRndzdqa0FyNWcxL2dOYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K
|
||||
# API server specific configuration options.
|
||||
apiServer:
|
||||
image: registry.k8s.io/kube-apiserver:v1.33.1 # The container image used in the API server manifest.
|
||||
# Extra certificate subject alternative names for the API server's certificate.
|
||||
certSANs:
|
||||
- 192.168.8.30
|
||||
disablePodSecurityPolicy: true # Disable PodSecurityPolicy in the API server and default manifests.
|
||||
# Configure the API server admission plugins.
|
||||
admissionControl:
|
||||
- name: PodSecurity # Name is the name of the admission controller.
|
||||
# Configuration is an embedded configuration object to be used as the plugin's
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
|
||||
defaults:
|
||||
audit: restricted
|
||||
audit-version: latest
|
||||
enforce: baseline
|
||||
enforce-version: latest
|
||||
warn: restricted
|
||||
warn-version: latest
|
||||
exemptions:
|
||||
namespaces:
|
||||
- kube-system
|
||||
runtimeClasses: []
|
||||
usernames: []
|
||||
kind: PodSecurityConfiguration
|
||||
# Configure the API server audit policy.
|
||||
auditPolicy:
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
- level: Metadata
|
||||
|
||||
# # Configure the API server authorization config. Node and RBAC authorizers are always added irrespective of the configuration.
|
||||
# authorizationConfig:
|
||||
# - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`.
|
||||
# name: webhook # Name is used to describe the authorizer.
|
||||
# # webhook is the configuration for the webhook authorizer.
|
||||
# webhook:
|
||||
# connectionInfo:
|
||||
# type: InClusterConfig
|
||||
# failurePolicy: Deny
|
||||
# matchConditionSubjectAccessReviewVersion: v1
|
||||
# matchConditions:
|
||||
# - expression: has(request.resourceAttributes)
|
||||
# - expression: '!(\''system:serviceaccounts:kube-system\'' in request.groups)'
|
||||
# subjectAccessReviewVersion: v1
|
||||
# timeout: 3s
|
||||
# - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`.
|
||||
# name: in-cluster-authorizer # Name is used to describe the authorizer.
|
||||
# # webhook is the configuration for the webhook authorizer.
|
||||
# webhook:
|
||||
# connectionInfo:
|
||||
# type: InClusterConfig
|
||||
# failurePolicy: NoOpinion
|
||||
# matchConditionSubjectAccessReviewVersion: v1
|
||||
# subjectAccessReviewVersion: v1
|
||||
# timeout: 3s
|
||||
# Controller manager server specific configuration options.
|
||||
controllerManager:
|
||||
image: registry.k8s.io/kube-controller-manager:v1.33.1 # The container image used in the controller manager manifest.
|
||||
# Kube-proxy server-specific configuration options
|
||||
proxy:
|
||||
image: registry.k8s.io/kube-proxy:v1.33.1 # The container image used in the kube-proxy manifest.
|
||||
|
||||
# # Disable kube-proxy deployment on cluster bootstrap.
|
||||
# disabled: false
|
||||
# Scheduler server specific configuration options.
|
||||
scheduler:
|
||||
image: registry.k8s.io/kube-scheduler:v1.33.1 # The container image used in the scheduler manifest.
|
||||
# Configures cluster member discovery.
|
||||
discovery:
|
||||
enabled: true # Enable the cluster membership discovery feature.
|
||||
# Configure registries used for cluster member discovery.
|
||||
registries:
|
||||
# Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
|
||||
kubernetes:
|
||||
disabled: true # Disable Kubernetes discovery registry.
|
||||
# Service registry is using an external service to push and pull information about cluster members.
|
||||
service: {}
|
||||
# # External service endpoint.
|
||||
# endpoint: https://discovery.talos.dev/
|
||||
# Etcd specific configuration options.
|
||||
etcd:
|
||||
# The `ca` is the root certificate authority of the PKI.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNTZ0F3SUJBZ0lSQU5pSkxOUTFZZU5ZL1c0V1pnTVR2UFF3Q2dZSUtvWkl6ajBFQXdJd0R6RU4KTUFzR0ExVUVDaE1FWlhSalpEQWVGdzB5TlRBMk1qTXdNak00TVROYUZ3MHpOVEEyTWpFd01qTTRNVE5hTUE4eApEVEFMQmdOVkJBb1RCR1YwWTJRd1dUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFUUWg0T0N3M2VVClpUajhadllHdnh6Mkd2UFdGN0NMOWFwVElxRTdzZkh5YzJ6UW1Ic1NpcGFabW1zR0kyLzZPaVJWV280V2JLeDUKSnMwRW12bkVUYmFSbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSApBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRkdGVlFqQzUxYXFtCjhyT2l6UVJXTDNkc2RvNndNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJQ2hvcm9JaVJ4b0VDZEN3dE40UFV4MGoKRUwwM1A3UGJTMDFhQWhNVHJPYkpBaUVBL09mb2RweVd0VlNIK1ZBRVBOcjZaanFOdnFEQTNWQmkraXNQY1YybwpiQUk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU8walRMSmU1TXNzUDhqK3hxbi9Dd0FxSXk1RHo2V1U4MXg2OW5sVFI4S1NvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMEllRGdzTjNsR1U0L0diMkJyOGM5aHJ6MWhld2kvV3FVeUtoTzdIeDhuTnMwSmg3RW9xVwptWnByQmlOditqb2tWVnFPRm15c2VTYk5CSnI1eEUyMmtRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
# # The container image used to create the etcd service.
|
||||
# image: gcr.io/etcd-development/etcd:v3.5.21
|
||||
|
||||
# # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
|
||||
# advertisedSubnets:
|
||||
# - 10.0.0.0/8
|
||||
# A list of urls that point to additional manifests.
|
||||
extraManifests: []
|
||||
# - https://www.example.com/manifest1.yaml
|
||||
# - https://www.example.com/manifest2.yaml
|
||||
|
||||
# A list of inline Kubernetes manifests.
|
||||
inlineManifests: []
|
||||
# - name: namespace-ci # Name of the manifest.
|
||||
# contents: |- # Manifest contents as a string.
|
||||
# apiVersion: v1
|
||||
# kind: Namespace
|
||||
# metadata:
|
||||
# name: ci
|
||||
|
||||
|
||||
# # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
|
||||
# # Decryption secret example (do not use in production!).
|
||||
# aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
|
||||
|
||||
# # Core DNS specific configuration options.
|
||||
# coreDNS:
|
||||
# image: registry.k8s.io/coredns/coredns:v1.12.1 # The `image` field is an override to the default coredns image.
|
||||
|
||||
# # External cloud provider configuration.
|
||||
# externalCloudProvider:
|
||||
# enabled: true # Enable external cloud provider.
|
||||
# # A list of urls that point to additional manifests for an external cloud provider.
|
||||
# manifests:
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
|
||||
|
||||
# # A map of key value pairs that will be added while fetching the extraManifests.
|
||||
# extraManifestHeaders:
|
||||
# Token: "1234567"
|
||||
# X-ExtraInfo: info
|
||||
|
||||
# # Settings for admin kubeconfig generation.
|
||||
# adminKubeconfig:
|
||||
# certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
|
||||
|
||||
# # Allows running workload on control-plane nodes.
|
||||
# allowSchedulingOnControlPlanes: true
|
23
setup/cluster-nodes/generated/secrets.yaml
Normal file
23
setup/cluster-nodes/generated/secrets.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
cluster:
|
||||
id: 1DOt3ZYTVTzEG_Q2IYnScCjz1rxZYwWRHV9hGXBu1UE=
|
||||
secret: qvOKMH5RJtMOPSLBnWCPV4apReFGTd1czZ+tfz11/jI=
|
||||
secrets:
|
||||
bootstraptoken: ed454d.o4jsg75idc817ojs
|
||||
secretboxencryptionsecret: e+8hExoi1Ap4IS6StTsScp72EXKAE2Xi+J7irS7UeG0=
|
||||
trustdinfo:
|
||||
token: t1yf7w.zwevymjw6v0v1q76
|
||||
certs:
|
||||
etcd:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmakNDQVNTZ0F3SUJBZ0lSQU5pSkxOUTFZZU5ZL1c0V1pnTVR2UFF3Q2dZSUtvWkl6ajBFQXdJd0R6RU4KTUFzR0ExVUVDaE1FWlhSalpEQWVGdzB5TlRBMk1qTXdNak00TVROYUZ3MHpOVEEyTWpFd01qTTRNVE5hTUE4eApEVEFMQmdOVkJBb1RCR1YwWTJRd1dUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFUUWg0T0N3M2VVClpUajhadllHdnh6Mkd2UFdGN0NMOWFwVElxRTdzZkh5YzJ6UW1Ic1NpcGFabW1zR0kyLzZPaVJWV280V2JLeDUKSnMwRW12bkVUYmFSbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSApBd0VHQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRkdGVlFqQzUxYXFtCjhyT2l6UVJXTDNkc2RvNndNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJQ2hvcm9JaVJ4b0VDZEN3dE40UFV4MGoKRUwwM1A3UGJTMDFhQWhNVHJPYkpBaUVBL09mb2RweVd0VlNIK1ZBRVBOcjZaanFOdnFEQTNWQmkraXNQY1YybwpiQUk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSU8walRMSmU1TXNzUDhqK3hxbi9Dd0FxSXk1RHo2V1U4MXg2OW5sVFI4S1NvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFMEllRGdzTjNsR1U0L0diMkJyOGM5aHJ6MWhld2kvV3FVeUtoTzdIeDhuTnMwSmg3RW9xVwptWnByQmlOditqb2tWVnFPRm15c2VTYk5CSnI1eEUyMmtRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
k8s:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRRWU5cFdPWEFzd09PNm9NYXNDaXRtakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTFNRFl5TXpBeU16Z3hNMW9YRFRNMU1EWXlNVEF5TXpneApNMW93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ3p0YTA1T3NWOU1NaVg4WDZEdC9xbkhWelkra2tqZ01rcjdsU1kzaERPbmVWYnBhOTJmSHlkS1QyWEgKcWN1L3FJWHpodTg0ckN0VWJuQUsyckJUekFPallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVtWEhwMmM5bGRtdFg0Y2RibDlpM0Rwd05GYzB3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBwVXVoNmIKYUMwaXdzNTh5WWVlYXVMU1JhbnEveVNUcGo2T0N4UGkvTXJpQWlFQW1DUVdRQ290NkM5b0c5TUlaeDFmMmMxcApBUFRFTHFNQm1vZ1NLSis5dXZBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUpMVWF4Z2RXR0Flb1ZNRW1CYkZHUjBjbTJMK1ZxNXFsVVZMaE1USHF1ZnVvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFTE8xclRrNnhYMHd5SmZ4Zm9PMytxY2RYTmo2U1NPQXlTdnVWSmplRU02ZDVWdWxyM1o4ZgpKMHBQWmNlcHk3K29oZk9HN3ppc0sxUnVjQXJhc0ZQTUF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
k8saggregator:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJZRENDQVFhZ0F3SUJBZ0lSQVB2Y2ZReS9pbWkzQUtZdm1GNnExcmd3Q2dZSUtvWkl6ajBFQXdJd0FEQWUKRncweU5UQTJNak13TWpNNE1UTmFGdzB6TlRBMk1qRXdNak00TVROYU1BQXdXVEFUQmdjcWhrak9QUUlCQmdncQpoa2pPUFFNQkJ3TkNBQVI5NjFKWXl4N2ZxSXJHaURhMTUvVFVTc2xoR2xjSWhzandvcGFpTDg0dzNiQVBaOVdQCjliRThKUnJOTUIvVGkxSUJwbm1IbitXZ3pjeFBnbmllYzZnWG8yRXdYekFPQmdOVkhROEJBZjhFQkFNQ0FvUXcKSFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01BOEdBMVVkRXdFQi93UUZNQU1CQWY4dwpIUVlEVlIwT0JCWUVGQ29XYVB4engxL01IanlqcVR1WkhXY2hOeXoxTUFvR0NDcUdTTTQ5QkFNQ0EwZ0FNRVVDCklHNWdQRHhmYVhNVlMwTEJ5bDNLOENLZVRGNHlBQnV0Zk0vT0hKRGR6ZHNsQWlFQW1pVU9tOU5ma2pQY2ducEcKZzdqd0NQbzczNW5zNXV4d2RRdEZpbjdnMEhvPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUxwZzZoSlBhR3A0ZmRPdkQwVGUwZklPSWJvWUdHdUM4OXBHbThWU3NYWE1vQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFZmV0U1dNc2UzNmlLeG9nMnRlZjAxRXJKWVJwWENJYkk4S0tXb2kvT01OMndEMmZWai9XeApQQ1VhelRBZjA0dFNBYVo1aDUvbG9NM01UNEo0bm5Pb0Z3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo=
|
||||
k8sserviceaccount:
|
||||
key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBK21FdWh4OWxrbXJXeUdaeWIvcXZCU1hUcys0R2lPRDZ2TlJhRSsrR044WnZxWkI1CktTWFM0Q3pzZXE4V0dKOXV2Yzg0WmdaWk9wY3ZZbXM1T1BKdzE2MjJFSUlaV1FKeXdzZ0F2NWFsTWxUZ1BxLzEKejRtSjlURW5lajZVMUE4cXU1U3FYa3F5dzNZNFdsVUU1TnlhR0d3RE9yMlduTjFTMWI0YXh0V2ZLa1hxUjlFUApvOWVrK1g3UHQwdVhQV0RQQlNiSUE1V3BQRkdqN3dic1lMMTcwcW1GemwvRElUY1Q2S3ROb0lxYTVWVGpqRmRkCkRDY2VKQ3ZldFMvT1F2WG1pcXhtTnBPbW02eFhqcGxRMmNYVUg5b3NsSHREUG4vMUszNlBVWlpaZFJHU2lvQm0KM0RHZHlOU2huN0pCN0J6dmZzaGFqU3pxeExxUmNhaHhMcVdZV2hPUEJiTmVXZ0lTMm5CcDJRYXZhMUR2YkpneQpadGVVaW1EK0VUZlB1QXFZOW9OS0Z0eFVTaS9pTUpYMTM0ZFUvQVZPRXZqaXhPNnlQQjZHVUxpb0ppOHJRVG9TCmVDNStRWXFSU2RSMzhhWFo3R2VSaUlvR3BqMndRY2Y2emVoRHJTUUdabU5BZlpaenV3T1JGei9pRTJrWTBXRGwKV1p2RFlTSFNXbk5UdmZyQk0xN1pEYzNGdTRaSGNaWUpKVGdCMDJGS2kzcS9uRWdudy9zTEhHUEl3SVIvaDlidgpzcVRVMDJYaHRKQlgwYUE2RlFqaG1NTGNvLzF0ci84Y3BPVVcvdVhPM2Y5czZHMW1OY21qeDNVamJqU09xSlRnCmFYVTlGeWZJR2lYei9JcDg0Q2Jsb0wvRXJxQmVXVEQvV2twMWF1QThQcXp6emFmU3NrSzNnd2Rla2NjQ0F3RUEKQVFLQ0FnQWVQUEt4dSs5NE90eWNwWTY5aWx0dElPVTJqanU0elRSSzdpUnJmWnAxT0VzUHJKU05ZbldHRmJiVwpRWTN4YnY1MkFFV0hRTTRHNlpwVE93Vi8rZnVrZUk5aE9BSnRWd0tGYSswaDRPK0ExWEpObW56eWZHR3VaeW9sCmluMmo0ZjhKcjNSOGl4aVlJRG9YRFdjdFovSlk3N2FHSWhQRFRHYkVJZW81bllsVVFYbXFyd3RzcTA3NmJoVVMKUmNLZ0FEQ1FVVFRkQmZhWWc4MldGbEoyMlNZbFNpR1FTODFxUUt6RldwR01uc3RYMWZtSWlmRXNBTG9VVStpdQpIaUM5YlNyVFpaVzU1L2xrNzBWQWJMQ3dmdTFWanNqMzE2NXhLUTFJSEVmeWFsalJ4Q0VHc1dkNndWS1ZIZytLClAxZC9JZndra00yQUk1bG96K3ExSjNWMUtqenNxdGVyY0JKTWhuTVdEYUp5NzhZaGZSZnY0TlNieC9ObjEveW0KanpvWXVjd3pRVEhsd0dLZUhoNG12OWZxM3U5cVJoTlEzNmNjOHowcndmT01BOFpBMVJOOFhkOG82dkxkNitHSQpSbDV6eHpoZ283MXB5V0dNNlZ5L3FqK1F0aWVZVzUrMHdUNVFqbW5WL256bDZLSWZzZGU5Q0xzcG02RnhUWVJlCjE5YzAwemlOWE56V3dPMG4yeTZkaWpKamErZ0lmT0pzVFlFb2dJQ0MxczB0N0orRGU0cHV4anVyalRjMTdZYkcKK1BpejMySmFCVDByYUUxdWlQZ1lhL3Bta1plRjBZTFgzemc4NGhSNHF3WmZsaHdNNTIxKzBJRWRRb29jd2Yycgoyb25xTWlVd2NhaVZzWEVBdjJzRDlwRkd3UEg4MUplY2JBcWNmZkJTcjVPY3VvSmsyUUtDQVFFQS93Nm1EbnFUClliK3dvOEl1SUpUanFjdzYwY1RQZzRnS0tsVzJFRWNqMW5qQitaZ2xzblZheHdMZ0QyaXd1d29BZWg1OTUzWkgKbjFoVk5Eb2VCcGJXcXBRY2VuZjlWVzNUMXpNVjVhZTNYenR1MzkrTExZUlVHakV3NitlMWNrendUSlRBYndnZAp5TnM5TjNDNno0bkhmd0NqRHc2VDhWTVFpRVB6akEveEp3L1RTQzBwRHQ5cTFQZ0hMMHBQMllkdkxvYlpEajJLCkRFb1ErcVE3Tm1XeXlLWGQxWUhZK3VaTDZ1SVlYUDNLSjVWQ0N6ZjlHVHZRUi9XL29DdTgzZzdzdWM3YndCajMKYnN5aElWQUxDTXRXSFhCVDdmNXJJVlhuZHEwdGl5cGQ2NTJDTjBya20xRHZ2L0tsTjZJV01jRkFudjRPV1M0aAphdEt0a3d6SVZCdmdQd0tDQVFFQSswNGJXaDVBVmRreXRzUy9peUlOWDQ1MXZsT0YvQVNIZW5EVjVQUDYxSWpXCll3eXFDNTlSdG0rbEI5MDNvZjVSM0RqQTZXelFvNTdUOFBwSmY2di8wN2RHSzQ2alM5ODRoNEswSzZseWllUHAKUlVlbFpEVDNIbi9WK2hhTUFscnlCUFNyRlFyRkVqdmNOMWN3SmMwTEtDSVBpNGVNeGYwMEdiTHErQ0Fic0szQQpCT3N1cDVxWlNMQWcrRGpIVDdGYnpyOTBMSlN1QnFNNXp0cnJHa1NlbmxQNEtRbGFRMTdEeWlJT2tVZUMvekhFCmg2K1NJMXNla3JHeTNEK3NrQW9HZTlOMVQyL3RPM2lsYVhtdTRIdVkwa3NCckNtZ3EzVTZROXZ0aW8yRmluL1QKQkQ2Y3Z2aUkxN1RJa3lIZkZWZktvRklyOWhIT0RYdEFad2lSQnFsc2VRS0NBUUFyOFQ4a3dYT0E1TUN2QmZaaQpnS1JVamE0WWs5cllvMmgwOEwxa1FvMW5Gdmo4WW4wa0tOblI3YW5pbmJ2TkRhVVZaUWwyQmtmQ3FUcE12REtPCkdoQ3o1TDZmVHVyamUvK0NWUGZSMERwa2V0M1lUakF4VUZvWkJSNlRsaUVKcHozRFErRi9mNXQ2RG1PV21LSm0KdlNzVXMyeGtYTE9hWVNBNUNkUDg3b1l5bjZSY0RBUEYzeklOclFtMzJRcTJ4SUdnTjNWUDRjUlY1N0RUTGRaUgp3ZVd5Y2ZrdEhxamVXU3o5TTZUVTZKaWFoem1RcXoyOHlqUlJJWUs1T3EvWVppUGN3MG5TNTdwQmFabmRIbWc0ClJLZjZmRzdKVXdyci9GdmJjMnlrVEZGUUZadm9vTXVRQXJxN2pEZHd4VWtqbTFMaDBZMXhTZVJSL2lnUGJLVmEKOEU2TEFvSUJBQ1Yxc2h3UDBGVTdxQlNZWlZqdS9ZRlY4ZlVwN0JueDd1UHdkK0hHQUlpMzBRVTR1UXc4ZG1pMApZYXczYkhpSU9WbVRXQ1l6WXpKUWxaVWhLZDJQSFBaSkpudU5xb2Uvd1dScHRrT2Y0WVB1WmpJK2lNZlVJVlg1CmhrTGVJNGFpV2RzbFFXOUVpTFc4R0lwalE3a094Ry82QzhrbnJuTkEyQWhRcERmU1NXNWZwL1RUdmNPY0J1ZFAKNGNvK1pHOWJwNnk4Mnl0ZUNrYlJBK2Z5dUFMVlliT0dIc0szTXk1QnJQdXZjZTV6ODNIbzBEdk5qd0lZTGdsOQoxWVNCTlU3UFA4SXJkaHdlT2dXWWFVZThyTFdubHRNWi9TalZsNjZYTGRVNXJrSHQ4SThCbU1uVUwzZEVBdG5zCmg4MXV5aHNiV0FmbjE4ZTVSYmE2dlpIZU5BZ0RMemtDZ2dFQkFKRXRJemdmdE85UE5RSDlQOHBBTnFaclpqWkMKZGJaaThCUkNTUy83b2tHeWtkQzZsdFJVd0c2ajIxNXZZcjlmVW94ODU5ZkI0RjhEK0FXWHNKSm44YnlQZFBuWQpwU1ZRMEV3a045aTE4WnowTitEbWVZRC9CUmdyVHA1OVU1Mmo5Q1ZyQWNQRlphVk00b0xwaEVSeDZvSURPOEcvCk9wUEZkVnJvMFhyN1lpbENiYVJLVWVWbjZWKzFIZ25zelhiUE9sakhrcGdXSXdKb1RkNkVWVDlvbXNVeFlVejcKRUR5L2RXNmVxVFBMUHR5Q2hKZlo5WDB6M09uUWpLbzcwdHhQa1VRTmw0azhqMU9mMFhMaklacmd6MmVub0FRZgpQYXhSc1lCckhNVnI5eStXaDhZdFdESGx1NUU4NlNaMXNIaHphOHhZZWpoQXRndzdqa0FyNWcxL2dOYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K
|
||||
os:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJT0hOamQ1blVzdVRGRXpsQmtFOVhkZUJ4b1AxMk9mY2R4a0tjQmZlU0xKbgotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K
|
7
setup/cluster-nodes/generated/talosconfig
Normal file
7
setup/cluster-nodes/generated/talosconfig
Normal file
@@ -0,0 +1,7 @@
|
||||
context: demo-cluster
|
||||
contexts:
|
||||
demo-cluster:
|
||||
endpoints: []
|
||||
ca: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJLVENCM0tBREFnRUNBaEVBc0xlWW83MXVpVUlEK3RUSEkrbFJjakFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTWpZd05qSXpNREl6T0RFeldqQVRNUkV3RHdZRApWUVFLRXdodmN6cGhaRzFwYmpBcU1BVUdBeXRsY0FNaEFPek5qd2FncnBYMFc0TWs1OWpoTmtRVU5UTUNobHFoCklnb1lrWnNkWUdhc28wZ3dSakFPQmdOVkhROEJBZjhFQkFNQ0I0QXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0h3WURWUjBqQkJnd0ZvQVV5OEhHSnlSYjI1bDUyMStLZE96N081SjRyNW93QlFZREsyVndBMEVBSmtOcwpGUDZMWUltTDhjR2l3TTRYc0FyZE9XVTdUMzhOaWpvTys2VE80cWRYZTdYSXZwTEZTeXFqRTBuREZtenpmdGVKCk9PM3BiMHlFUmtLcG1rNUJCUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||
key: LS0tLS1CRUdJTiBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0KTUM0Q0FRQXdCUVlESzJWd0JDSUVJQmhtWXY4Wk5kaVVBMG5mbHAvT3VOdHJiM09Rc2xkZUc4cEU2YkFKTStENwotLS0tLUVORCBFRDI1NTE5IFBSSVZBVEUgS0VZLS0tLS0K
|
606
setup/cluster-nodes/generated/worker.yaml
Normal file
606
setup/cluster-nodes/generated/worker.yaml
Normal file
@@ -0,0 +1,606 @@
|
||||
version: v1alpha1 # Indicates the schema used to decode the contents.
|
||||
debug: false # Enable verbose logging to the console.
|
||||
persist: true
|
||||
# Provides machine specific configuration options.
|
||||
machine:
|
||||
type: worker # Defines the role of the machine within the cluster.
|
||||
token: t1yf7w.zwevymjw6v0v1q76 # The `token` is used by a machine to join the PKI of the cluster.
|
||||
# The root certificate authority of the PKI.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJQekNCOHFBREFnRUNBaEVBa2JEQ2VJR09iTlBZZGQxRTBNSUozVEFGQmdNclpYQXdFREVPTUF3R0ExVUUKQ2hNRmRHRnNiM013SGhjTk1qVXdOakl6TURJek9ERXpXaGNOTXpVd05qSXhNREl6T0RFeldqQVFNUTR3REFZRApWUVFLRXdWMFlXeHZjekFxTUFVR0F5dGxjQU1oQVBhbVhHamhnN0FFUmpQZUFJL3dQK21YWVZsYm95M01TUTErCm1CTGh3NmhLbzJFd1h6QU9CZ05WSFE4QkFmOEVCQU1DQW9Rd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUcKQ0NzR0FRVUZCd01DTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk12QnhpY2tXOXVaZWR0ZgppblRzK3p1U2VLK2FNQVVHQXl0bGNBTkJBSEl5Y2ttT3lGMWEvTVJROXp4a1lRcy81clptRjl0YTVsZktCamVlCmRLV0lVbFNRNkY4c1hjZ1orWlhOcXNjSHNwbzFKdStQUVVwa3VocWREdDBRblFjPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: ""
|
||||
# Extra certificate subject alternative names for the machine's certificate.
|
||||
certSANs: []
|
||||
# # Uncomment this to enable SANs.
|
||||
# - 10.0.0.10
|
||||
# - 172.16.0.10
|
||||
# - 192.168.0.10
|
||||
|
||||
# Used to provide additional options to the kubelet.
|
||||
kubelet:
|
||||
image: ghcr.io/siderolabs/kubelet:v1.33.1 # The `image` field is an optional reference to an alternative kubelet image.
|
||||
defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
|
||||
disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
|
||||
|
||||
# # The `ClusterDNS` field is an optional reference to an alternative kubelet clusterDNS ip list.
|
||||
# clusterDNS:
|
||||
# - 10.96.0.10
|
||||
# - 169.254.2.53
|
||||
|
||||
# # The `extraArgs` field is used to provide additional flags to the kubelet.
|
||||
# extraArgs:
|
||||
# key: value
|
||||
|
||||
# # The `extraMounts` field is used to add additional mounts to the kubelet container.
|
||||
# extraMounts:
|
||||
# - destination: /var/lib/example # Destination is the absolute path where the mount will be placed in the container.
|
||||
# type: bind # Type specifies the mount kind.
|
||||
# source: /var/lib/example # Source specifies the source path of the mount.
|
||||
# # Options are fstab style mount options.
|
||||
# options:
|
||||
# - bind
|
||||
# - rshared
|
||||
# - rw
|
||||
|
||||
# # The `extraConfig` field is used to provide kubelet configuration overrides.
|
||||
# extraConfig:
|
||||
# serverTLSBootstrap: true
|
||||
|
||||
# # The `KubeletCredentialProviderConfig` field is used to provide kubelet credential configuration.
|
||||
# credentialProviderConfig:
|
||||
# apiVersion: kubelet.config.k8s.io/v1
|
||||
# kind: CredentialProviderConfig
|
||||
# providers:
|
||||
# - apiVersion: credentialprovider.kubelet.k8s.io/v1
|
||||
# defaultCacheDuration: 12h
|
||||
# matchImages:
|
||||
# - '*.dkr.ecr.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.*.amazonaws.com.cn'
|
||||
# - '*.dkr.ecr-fips.*.amazonaws.com'
|
||||
# - '*.dkr.ecr.us-iso-east-1.c2s.ic.gov'
|
||||
# - '*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov'
|
||||
# name: ecr-credential-provider
|
||||
|
||||
# # The `nodeIP` field is used to configure `--node-ip` flag for the kubelet.
|
||||
# nodeIP:
|
||||
# # The `validSubnets` field configures the networks to pick kubelet node IP from.
|
||||
# validSubnets:
|
||||
# - 10.0.0.0/8
|
||||
# - '!10.0.0.3/32'
|
||||
# - fdc7::/16
|
||||
# Provides machine specific network configuration options.
|
||||
network: {}
|
||||
# # `interfaces` is used to define the network interface configuration.
|
||||
# interfaces:
|
||||
# - interface: enp0s1 # The interface name.
|
||||
# # Assigns static IP addresses to the interface.
|
||||
# addresses:
|
||||
# - 192.168.2.0/24
|
||||
# # A list of routes associated with the interface.
|
||||
# routes:
|
||||
# - network: 0.0.0.0/0 # The route's network (destination).
|
||||
# gateway: 192.168.2.1 # The route's gateway (if empty, creates link scope route).
|
||||
# metric: 1024 # The optional metric for the route.
|
||||
# mtu: 1500 # The interface's MTU.
|
||||
#
|
||||
# # # Picks a network device using the selector.
|
||||
|
||||
# # # select a device with bus prefix 00:*.
|
||||
# # deviceSelector:
|
||||
# # busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# # # select a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
|
||||
# # deviceSelector:
|
||||
# # hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard.
|
||||
# # driver: virtio_net # Kernel driver, supports matching by wildcard.
|
||||
# # # select a device with bus prefix 00:*, a device with mac address matching `*:f0:ab` and `virtio` kernel driver.
|
||||
# # deviceSelector:
|
||||
# # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard.
|
||||
# # driver: virtio_net # Kernel driver, supports matching by wildcard.
|
||||
|
||||
# # # Bond specific options.
|
||||
# # bond:
|
||||
# # # The interfaces that make up the bond.
|
||||
# # interfaces:
|
||||
# # - enp2s0
|
||||
# # - enp2s1
|
||||
# # # Picks a network device using the selector.
|
||||
# # deviceSelectors:
|
||||
# # - busPath: 00:* # PCI, USB bus prefix, supports matching by wildcard.
|
||||
# # - hardwareAddr: '*:f0:ab' # Device hardware (MAC) address, supports matching by wildcard.
|
||||
# # driver: virtio_net # Kernel driver, supports matching by wildcard.
|
||||
# # mode: 802.3ad # A bond option.
|
||||
# # lacpRate: fast # A bond option.
|
||||
|
||||
# # # Bridge specific options.
|
||||
# # bridge:
|
||||
# # # The interfaces that make up the bridge.
|
||||
# # interfaces:
|
||||
# # - enxda4042ca9a51
|
||||
# # - enxae2a6774c259
|
||||
# # # Enable STP on this bridge.
|
||||
# # stp:
|
||||
# # enabled: true # Whether Spanning Tree Protocol (STP) is enabled.
|
||||
|
||||
# # # Configure this device as a bridge port.
|
||||
# # bridgePort:
|
||||
# # master: br0 # The name of the bridge master interface
|
||||
|
||||
# # # Indicates if DHCP should be used to configure the interface.
|
||||
# # dhcp: true
|
||||
|
||||
# # # DHCP specific options.
|
||||
# # dhcpOptions:
|
||||
# # routeMetric: 1024 # The priority of all routes received via DHCP.
|
||||
|
||||
# # # Wireguard specific configuration.
|
||||
|
||||
# # # wireguard server example
|
||||
# # wireguard:
|
||||
# # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# # listenPort: 51111 # Specifies a device's listening port.
|
||||
# # # Specifies a list of peer configurations to apply to a device.
|
||||
# # peers:
|
||||
# # - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# # endpoint: 192.168.1.3 # Specifies the endpoint of this peer entry.
|
||||
# # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# # allowedIPs:
|
||||
# # - 192.168.1.0/24
|
||||
# # # wireguard peer example
|
||||
# # wireguard:
|
||||
# # privateKey: ABCDEF... # Specifies a private key configuration (base64 encoded).
|
||||
# # # Specifies a list of peer configurations to apply to a device.
|
||||
# # peers:
|
||||
# # - publicKey: ABCDEF... # Specifies the public key of this peer.
|
||||
# # endpoint: 192.168.1.2:51822 # Specifies the endpoint of this peer entry.
|
||||
# # persistentKeepaliveInterval: 10s # Specifies the persistent keepalive interval for this peer.
|
||||
# # # AllowedIPs specifies a list of allowed IP addresses in CIDR notation for this peer.
|
||||
# # allowedIPs:
|
||||
# # - 192.168.1.0/24
|
||||
|
||||
# # # Virtual (shared) IP address configuration.
|
||||
|
||||
# # # layer2 vip example
|
||||
# # vip:
|
||||
# # ip: 172.16.199.55 # Specifies the IP address to be used.
|
||||
|
||||
# # Used to statically set the nameservers for the machine.
|
||||
# nameservers:
|
||||
# - 8.8.8.8
|
||||
# - 1.1.1.1
|
||||
|
||||
# # Used to statically set arbitrary search domains.
|
||||
# searchDomains:
|
||||
# - example.org
|
||||
# - example.com
|
||||
|
||||
# # Allows for extra entries to be added to the `/etc/hosts` file
|
||||
# extraHostEntries:
|
||||
# - ip: 192.168.1.100 # The IP of the host.
|
||||
# # The host alias.
|
||||
# aliases:
|
||||
# - example
|
||||
# - example.domain.tld
|
||||
|
||||
# # Configures KubeSpan feature.
|
||||
# kubespan:
|
||||
# enabled: true # Enable the KubeSpan feature.
|
||||
|
||||
# Used to provide instructions for installations.
|
||||
install:
|
||||
disk: /dev/sda # The disk used for installations.
|
||||
image: ghcr.io/siderolabs/installer:v1.10.3 # Allows for supplying the image used to perform the installation.
|
||||
wipe: false # Indicates if the installation disk should be wiped at installation time.
|
||||
|
||||
# # Look up disk using disk attributes like model, size, serial and others.
|
||||
# diskSelector:
|
||||
# size: 4GB # Disk size.
|
||||
# model: WDC* # Disk model `/sys/block/<dev>/device/model`.
|
||||
# busPath: /pci0000:00/0000:00:17.0/ata1/host0/target0:0:0/0:0:0:0 # Disk bus path.
|
||||
|
||||
# # Allows for supplying extra kernel args via the bootloader.
|
||||
# extraKernelArgs:
|
||||
# - talos.platform=metal
|
||||
# - reboot=k
|
||||
# Used to configure the machine's container image registry mirrors.
|
||||
registries: {}
|
||||
# # Specifies mirror configuration for each registry host namespace.
|
||||
# mirrors:
|
||||
# ghcr.io:
|
||||
# # List of endpoints (URLs) for registry mirrors to use.
|
||||
# endpoints:
|
||||
# - https://registry.insecure
|
||||
# - https://ghcr.io/v2/
|
||||
|
||||
# # Specifies TLS & auth configuration for HTTPS image registries.
|
||||
# config:
|
||||
# registry.insecure:
|
||||
# # The TLS configuration for the registry.
|
||||
# tls:
|
||||
# insecureSkipVerify: true # Skip TLS server certificate verification (not recommended).
|
||||
#
|
||||
# # # Enable mutual TLS authentication with the registry.
|
||||
# # clientIdentity:
|
||||
# # crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# # key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
#
|
||||
# # # The auth configuration for this registry.
|
||||
# # auth:
|
||||
# # username: username # Optional registry authentication.
|
||||
# # password: password # Optional registry authentication.
|
||||
|
||||
# Features describe individual Talos features that can be switched on or off.
|
||||
features:
|
||||
rbac: true # Enable role-based access control (RBAC).
|
||||
stableHostname: true # Enable stable default hostname.
|
||||
apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
|
||||
diskQuotaSupport: true # Enable XFS project quota support for EPHEMERAL partition and user disks.
|
||||
# KubePrism - local proxy/load balancer on defined port that will distribute
|
||||
kubePrism:
|
||||
enabled: true # Enable KubePrism support - will start local load balancing proxy.
|
||||
port: 7445 # KubePrism port.
|
||||
# Configures host DNS caching resolver.
|
||||
hostDNS:
|
||||
enabled: true # Enable host DNS caching resolver.
|
||||
forwardKubeDNSToHost: true # Use the host DNS resolver as upstream for Kubernetes CoreDNS pods.
|
||||
|
||||
# # Configure Talos API access from Kubernetes pods.
|
||||
# kubernetesTalosAPIAccess:
|
||||
# enabled: true # Enable Talos API access from Kubernetes pods.
|
||||
# # The list of Talos API roles which can be granted for access from Kubernetes pods.
|
||||
# allowedRoles:
|
||||
# - os:reader
|
||||
# # The list of Kubernetes namespaces Talos API access is available from.
|
||||
# allowedKubernetesNamespaces:
|
||||
# - kube-system
|
||||
|
||||
# # Provides machine specific control plane configuration options.
|
||||
|
||||
# # ControlPlane definition example.
|
||||
# controlPlane:
|
||||
# # Controller manager machine specific configuration options.
|
||||
# controllerManager:
|
||||
# disabled: false # Disable kube-controller-manager on the node.
|
||||
# # Scheduler machine specific configuration options.
|
||||
# scheduler:
|
||||
# disabled: true # Disable kube-scheduler on the node.
|
||||
|
||||
# # Used to provide static pod definitions to be run by the kubelet directly bypassing the kube-apiserver.
|
||||
|
||||
# # nginx static pod.
|
||||
# pods:
|
||||
# - apiVersion: v1
|
||||
# kind: pod
|
||||
# metadata:
|
||||
# name: nginx
|
||||
# spec:
|
||||
# containers:
|
||||
# - image: nginx
|
||||
# name: nginx
|
||||
|
||||
# # Allows the addition of user specified files.
|
||||
|
||||
# # MachineFiles usage example.
|
||||
# files:
|
||||
# - content: '...' # The contents of the file.
|
||||
# permissions: 0o666 # The file's permissions in octal.
|
||||
# path: /tmp/file.txt # The path of the file.
|
||||
# op: append # The operation to use
|
||||
|
||||
# # The `env` field allows for the addition of environment variables.
|
||||
|
||||
# # Environment variables definition examples.
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: info
|
||||
# GRPC_GO_LOG_VERBOSITY_LEVEL: "99"
|
||||
# https_proxy: http://SERVER:PORT/
|
||||
# env:
|
||||
# GRPC_GO_LOG_SEVERITY_LEVEL: error
|
||||
# https_proxy: https://USERNAME:PASSWORD@SERVER:PORT/
|
||||
# env:
|
||||
# https_proxy: http://DOMAIN\USERNAME:PASSWORD@SERVER:PORT/
|
||||
|
||||
# # Used to configure the machine's time settings.
|
||||
|
||||
# # Example configuration for cloudflare ntp server.
|
||||
# time:
|
||||
# disabled: false # Indicates if the time service is disabled for the machine.
|
||||
# # description: |
|
||||
# servers:
|
||||
# - time.cloudflare.com
|
||||
# bootTimeout: 2m0s # Specifies the timeout when the node time is considered to be in sync unlocking the boot sequence.
|
||||
|
||||
# # Used to configure the machine's sysctls.
|
||||
|
||||
# # MachineSysctls usage example.
|
||||
# sysctls:
|
||||
# kernel.domainname: talos.dev
|
||||
# net.ipv4.ip_forward: "0"
|
||||
# net/ipv6/conf/eth0.100/disable_ipv6: "1"
|
||||
|
||||
# # Used to configure the machine's sysfs.
|
||||
|
||||
# # MachineSysfs usage example.
|
||||
# sysfs:
|
||||
# devices.system.cpu.cpu0.cpufreq.scaling_governor: performance
|
||||
|
||||
# # Machine system disk encryption configuration.
|
||||
# systemDiskEncryption:
|
||||
# # Ephemeral partition encryption.
|
||||
# ephemeral:
|
||||
# provider: luks2 # Encryption provider to use for the encryption.
|
||||
# # Defines the encryption keys generation and storage method.
|
||||
# keys:
|
||||
# - # Deterministically generated key from the node UUID and PartitionLabel.
|
||||
# nodeID: {}
|
||||
# slot: 0 # Key slot number for LUKS2 encryption.
|
||||
#
|
||||
# # # KMS managed encryption key.
|
||||
# # kms:
|
||||
# # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key.
|
||||
#
|
||||
# # # Cipher kind to use for the encryption. Depends on the encryption provider.
|
||||
# # cipher: aes-xts-plain64
|
||||
|
||||
# # # Defines the encryption sector size.
|
||||
# # blockSize: 4096
|
||||
|
||||
# # # Additional --perf parameters for the LUKS2 encryption.
|
||||
# # options:
|
||||
# # - no_read_workqueue
|
||||
# # - no_write_workqueue
|
||||
|
||||
# # Configures the udev system.
|
||||
# udev:
|
||||
# # List of udev rules to apply to the udev system
|
||||
# rules:
|
||||
# - SUBSYSTEM=="drm", KERNEL=="renderD*", GROUP="44", MODE="0660"
|
||||
|
||||
# # Configures the logging system.
|
||||
# logging:
|
||||
# # Logging destination.
|
||||
# destinations:
|
||||
# - endpoint: tcp://1.2.3.4:12345 # Where to send logs. Supported protocols are "tcp" and "udp".
|
||||
# format: json_lines # Logs format.
|
||||
|
||||
# # Configures the kernel.
|
||||
# kernel:
|
||||
# # Kernel modules to load.
|
||||
# modules:
|
||||
# - name: brtfs # Module name.
|
||||
|
||||
# # Configures the seccomp profiles for the machine.
|
||||
# seccompProfiles:
|
||||
# - name: audit.json # The `name` field is used to provide the file name of the seccomp profile.
|
||||
# # The `value` field is used to provide the seccomp profile.
|
||||
# value:
|
||||
# defaultAction: SCMP_ACT_LOG
|
||||
|
||||
# # Override (patch) settings in the default OCI runtime spec for CRI containers.
|
||||
|
||||
# # override default open file limit
|
||||
# baseRuntimeSpecOverrides:
|
||||
# process:
|
||||
# rlimits:
|
||||
# - hard: 1024
|
||||
# soft: 1024
|
||||
# type: RLIMIT_NOFILE
|
||||
|
||||
# # Configures the node labels for the machine.
|
||||
|
||||
# # node labels example.
|
||||
# nodeLabels:
|
||||
# exampleLabel: exampleLabelValue
|
||||
|
||||
# # Configures the node annotations for the machine.
|
||||
|
||||
# # node annotations example.
|
||||
# nodeAnnotations:
|
||||
# customer.io/rack: r13a25
|
||||
|
||||
# # Configures the node taints for the machine. Effect is optional.
|
||||
|
||||
# # node taints example.
|
||||
# nodeTaints:
|
||||
# exampleTaint: exampleTaintValue:NoSchedule
|
||||
# Provides cluster specific configuration options.
|
||||
cluster:
|
||||
id: 1DOt3ZYTVTzEG_Q2IYnScCjz1rxZYwWRHV9hGXBu1UE= # Globally unique identifier for this cluster (base64 encoded random 32 bytes).
|
||||
secret: qvOKMH5RJtMOPSLBnWCPV4apReFGTd1czZ+tfz11/jI= # Shared secret of cluster (base64 encoded random 32 bytes).
|
||||
# Provides control plane specific configuration options.
|
||||
controlPlane:
|
||||
endpoint: https://192.168.8.30:6443 # Endpoint is the canonical controlplane endpoint, which can be an IP address or a DNS hostname.
|
||||
clusterName: demo-cluster # Configures the cluster's name.
|
||||
# Provides cluster specific network configuration options.
|
||||
network:
|
||||
dnsDomain: cluster.local # The domain used by Kubernetes DNS.
|
||||
# The pod subnet CIDR.
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
# The service subnet CIDR.
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/12
|
||||
|
||||
# # The CNI used.
|
||||
# cni:
|
||||
# name: custom # Name of CNI to use.
|
||||
# # URLs containing manifests to apply for the CNI.
|
||||
# urls:
|
||||
# - https://docs.projectcalico.org/archive/v3.20/manifests/canal.yaml
|
||||
token: ed454d.o4jsg75idc817ojs # The [bootstrap token](https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/) used to join the cluster.
|
||||
# The base64 encoded root certificate authority used by Kubernetes.
|
||||
ca:
|
||||
crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJpakNDQVMrZ0F3SUJBZ0lRRWU5cFdPWEFzd09PNm9NYXNDaXRtakFLQmdncWhrak9QUVFEQWpBVk1STXcKRVFZRFZRUUtFd3ByZFdKbGNtNWxkR1Z6TUI0WERUSTFNRFl5TXpBeU16Z3hNMW9YRFRNMU1EWXlNVEF5TXpneApNMW93RlRFVE1CRUdBMVVFQ2hNS2EzVmlaWEp1WlhSbGN6QlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VICkEwSUFCQ3p0YTA1T3NWOU1NaVg4WDZEdC9xbkhWelkra2tqZ01rcjdsU1kzaERPbmVWYnBhOTJmSHlkS1QyWEgKcWN1L3FJWHpodTg0ckN0VWJuQUsyckJUekFPallUQmZNQTRHQTFVZER3RUIvd1FFQXdJQ2hEQWRCZ05WSFNVRQpGakFVQmdnckJnRUZCUWNEQVFZSUt3WUJCUVVIQXdJd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFCkZnUVVtWEhwMmM5bGRtdFg0Y2RibDlpM0Rwd05GYzB3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBwVXVoNmIKYUMwaXdzNTh5WWVlYXVMU1JhbnEveVNUcGo2T0N4UGkvTXJpQWlFQW1DUVdRQ290NkM5b0c5TUlaeDFmMmMxcApBUFRFTHFNQm1vZ1NLSis5dXZBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: ""
|
||||
# Configures cluster member discovery.
|
||||
discovery:
|
||||
enabled: true # Enable the cluster membership discovery feature.
|
||||
# Configure registries used for cluster member discovery.
|
||||
registries:
|
||||
# Kubernetes registry uses Kubernetes API server to discover cluster members and stores additional information
|
||||
kubernetes:
|
||||
disabled: true # Disable Kubernetes discovery registry.
|
||||
# Service registry is using an external service to push and pull information about cluster members.
|
||||
service: {}
|
||||
# # External service endpoint.
|
||||
# endpoint: https://discovery.talos.dev/
|
||||
|
||||
# # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
|
||||
# # Decryption secret example (do not use in production!).
|
||||
# aescbcEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
|
||||
|
||||
# # A key used for the [encryption of secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/).
|
||||
|
||||
# # Decryption secret example (do not use in production!).
|
||||
# secretboxEncryptionSecret: z01mye6j16bspJYtTB/5SFX8j7Ph4JXxM2Xuu4vsBPM=
|
||||
|
||||
# # The base64 encoded aggregator certificate authority used by Kubernetes for front-proxy certificate generation.
|
||||
|
||||
# # AggregatorCA example.
|
||||
# aggregatorCA:
|
||||
# crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
|
||||
# # The base64 encoded private key for service account token generation.
|
||||
|
||||
# # AggregatorCA example.
|
||||
# serviceAccount:
|
||||
# key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
|
||||
# # API server specific configuration options.
|
||||
# apiServer:
|
||||
# image: registry.k8s.io/kube-apiserver:v1.33.1 # The container image used in the API server manifest.
|
||||
# # Extra arguments to supply to the API server.
|
||||
# extraArgs:
|
||||
# feature-gates: ServerSideApply=true
|
||||
# http2-max-streams-per-connection: "32"
|
||||
# # Extra certificate subject alternative names for the API server's certificate.
|
||||
# certSANs:
|
||||
# - 1.2.3.4
|
||||
# - 4.5.6.7
|
||||
# # Configure the API server admission plugins.
|
||||
# admissionControl:
|
||||
# - name: PodSecurity # Name is the name of the admission controller.
|
||||
# # Configuration is an embedded configuration object to be used as the plugin's
|
||||
# configuration:
|
||||
# apiVersion: pod-security.admission.config.k8s.io/v1alpha1
|
||||
# defaults:
|
||||
# audit: restricted
|
||||
# audit-version: latest
|
||||
# enforce: baseline
|
||||
# enforce-version: latest
|
||||
# warn: restricted
|
||||
# warn-version: latest
|
||||
# exemptions:
|
||||
# namespaces:
|
||||
# - kube-system
|
||||
# runtimeClasses: []
|
||||
# usernames: []
|
||||
# kind: PodSecurityConfiguration
|
||||
# # Configure the API server audit policy.
|
||||
# auditPolicy:
|
||||
# apiVersion: audit.k8s.io/v1
|
||||
# kind: Policy
|
||||
# rules:
|
||||
# - level: Metadata
|
||||
# # Configure the API server authorization config. Node and RBAC authorizers are always added irrespective of the configuration.
|
||||
# authorizationConfig:
|
||||
# - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`.
|
||||
# name: webhook # Name is used to describe the authorizer.
|
||||
# # webhook is the configuration for the webhook authorizer.
|
||||
# webhook:
|
||||
# connectionInfo:
|
||||
# type: InClusterConfig
|
||||
# failurePolicy: Deny
|
||||
# matchConditionSubjectAccessReviewVersion: v1
|
||||
# matchConditions:
|
||||
# - expression: has(request.resourceAttributes)
|
||||
# - expression: '!(\''system:serviceaccounts:kube-system\'' in request.groups)'
|
||||
# subjectAccessReviewVersion: v1
|
||||
# timeout: 3s
|
||||
# - type: Webhook # Type is the name of the authorizer. Allowed values are `Node`, `RBAC`, and `Webhook`.
|
||||
# name: in-cluster-authorizer # Name is used to describe the authorizer.
|
||||
# # webhook is the configuration for the webhook authorizer.
|
||||
# webhook:
|
||||
# connectionInfo:
|
||||
# type: InClusterConfig
|
||||
# failurePolicy: NoOpinion
|
||||
# matchConditionSubjectAccessReviewVersion: v1
|
||||
# subjectAccessReviewVersion: v1
|
||||
# timeout: 3s
|
||||
|
||||
# # Controller manager server specific configuration options.
|
||||
# controllerManager:
|
||||
# image: registry.k8s.io/kube-controller-manager:v1.33.1 # The container image used in the controller manager manifest.
|
||||
# # Extra arguments to supply to the controller manager.
|
||||
# extraArgs:
|
||||
# feature-gates: ServerSideApply=true
|
||||
|
||||
# # Kube-proxy server-specific configuration options
|
||||
# proxy:
|
||||
# disabled: false # Disable kube-proxy deployment on cluster bootstrap.
|
||||
# image: registry.k8s.io/kube-proxy:v1.33.1 # The container image used in the kube-proxy manifest.
|
||||
# mode: ipvs # proxy mode of kube-proxy.
|
||||
# # Extra arguments to supply to kube-proxy.
|
||||
# extraArgs:
|
||||
# proxy-mode: iptables
|
||||
|
||||
# # Scheduler server specific configuration options.
|
||||
# scheduler:
|
||||
# image: registry.k8s.io/kube-scheduler:v1.33.1 # The container image used in the scheduler manifest.
|
||||
# # Extra arguments to supply to the scheduler.
|
||||
# extraArgs:
|
||||
# feature-gates: AllBeta=true
|
||||
|
||||
# # Etcd specific configuration options.
|
||||
# etcd:
|
||||
# image: gcr.io/etcd-development/etcd:v3.5.21 # The container image used to create the etcd service.
|
||||
# # The `ca` is the root certificate authority of the PKI.
|
||||
# ca:
|
||||
# crt: LS0tIEVYQU1QTEUgQ0VSVElGSUNBVEUgLS0t
|
||||
# key: LS0tIEVYQU1QTEUgS0VZIC0tLQ==
|
||||
# # Extra arguments to supply to etcd.
|
||||
# extraArgs:
|
||||
# election-timeout: "5000"
|
||||
# # The `advertisedSubnets` field configures the networks to pick etcd advertised IP from.
|
||||
# advertisedSubnets:
|
||||
# - 10.0.0.0/8
|
||||
|
||||
# # Core DNS specific configuration options.
|
||||
# coreDNS:
|
||||
# image: registry.k8s.io/coredns/coredns:v1.12.1 # The `image` field is an override to the default coredns image.
|
||||
|
||||
# # External cloud provider configuration.
|
||||
# externalCloudProvider:
|
||||
# enabled: true # Enable external cloud provider.
|
||||
# # A list of urls that point to additional manifests for an external cloud provider.
|
||||
# manifests:
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/rbac.yaml
|
||||
# - https://raw.githubusercontent.com/kubernetes/cloud-provider-aws/v1.20.0-alpha.0/manifests/aws-cloud-controller-manager-daemonset.yaml
|
||||
|
||||
# # A list of urls that point to additional manifests.
|
||||
# extraManifests:
|
||||
# - https://www.example.com/manifest1.yaml
|
||||
# - https://www.example.com/manifest2.yaml
|
||||
|
||||
# # A map of key value pairs that will be added while fetching the extraManifests.
|
||||
# extraManifestHeaders:
|
||||
# Token: "1234567"
|
||||
# X-ExtraInfo: info
|
||||
|
||||
# # A list of inline Kubernetes manifests.
|
||||
# inlineManifests:
|
||||
# - name: namespace-ci # Name of the manifest.
|
||||
# contents: |- # Manifest contents as a string.
|
||||
# apiVersion: v1
|
||||
# kind: Namespace
|
||||
# metadata:
|
||||
# name: ci
|
||||
|
||||
# # Settings for admin kubeconfig generation.
|
||||
# adminKubeconfig:
|
||||
# certLifetime: 1h0m0s # Admin kubeconfig certificate lifetime (default is 1 year).
|
||||
|
||||
# # Allows running workload on control-plane nodes.
|
||||
# allowSchedulingOnControlPlanes: true
|
80
setup/cluster-nodes/init-cluster.sh
Executable file
80
setup/cluster-nodes/init-cluster.sh
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Talos cluster initialization script
|
||||
# This script performs one-time cluster setup: generates secrets, base configs, and sets up talosctl
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Check if WC_HOME is set
|
||||
if [ -z "${WC_HOME:-}" ]; then
|
||||
echo "Error: WC_HOME environment variable not set. Run \`source ./env.sh\`."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||
|
||||
# Get cluster configuration from config.yaml
|
||||
CLUSTER_NAME=$(wild-config cluster.name)
|
||||
VIP=$(wild-config cluster.nodes.control.vip)
|
||||
TALOS_VERSION=$(wild-config cluster.nodes.talos.version)
|
||||
|
||||
echo "Initializing Talos cluster: $CLUSTER_NAME"
|
||||
echo "VIP: $VIP"
|
||||
echo "Talos version: $TALOS_VERSION"
|
||||
|
||||
# Create directories
|
||||
mkdir -p generated final patch
|
||||
|
||||
# Check if cluster secrets already exist
|
||||
if [ -f "generated/secrets.yaml" ]; then
|
||||
echo ""
|
||||
echo "⚠️ Cluster secrets already exist!"
|
||||
echo "This will regenerate ALL cluster certificates and invalidate existing nodes."
|
||||
echo ""
|
||||
read -p "Do you want to continue? (y/N): " -r
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Cancelled."
|
||||
exit 0
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Generate fresh cluster secrets
|
||||
echo "Generating cluster secrets..."
|
||||
cd generated
|
||||
talosctl gen secrets -o secrets.yaml --force
|
||||
|
||||
echo "Generating base machine configs..."
|
||||
talosctl gen config --with-secrets secrets.yaml "$CLUSTER_NAME" "https://$VIP:6443" --force
|
||||
cd ..
|
||||
|
||||
# Setup talosctl context
|
||||
echo "Setting up talosctl context..."
|
||||
|
||||
# Remove existing context if it exists
|
||||
talosctl config context "$CLUSTER_NAME" --remove 2>/dev/null || true
|
||||
|
||||
# Merge new configuration
|
||||
talosctl config merge ./generated/talosconfig
|
||||
talosctl config endpoint "$VIP"
|
||||
|
||||
echo ""
|
||||
echo "✅ Cluster initialization complete!"
|
||||
echo ""
|
||||
echo "Cluster details:"
|
||||
echo " - Name: $CLUSTER_NAME"
|
||||
echo " - VIP: $VIP"
|
||||
echo " - Secrets: generated/secrets.yaml"
|
||||
echo " - Base configs: generated/controlplane.yaml, generated/worker.yaml"
|
||||
echo ""
|
||||
echo "Talosctl context configured:"
|
||||
talosctl config info
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Register nodes with hardware detection:"
|
||||
echo " ./detect-node-hardware.sh <maintenance-ip> <node-number>"
|
||||
echo ""
|
||||
echo "2. Generate machine configurations:"
|
||||
echo " ./generate-machine-configs.sh"
|
||||
echo ""
|
||||
echo "3. Apply configurations to nodes"
|
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
apt-get update
|
||||
|
||||
# Longhorn requirements
|
||||
|
||||
# Install iscsi on all nodes.
|
||||
# apt-get install open-iscsi
|
||||
# modprobe iscsi_tcp
|
||||
# systemctl restart open-iscsi
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/prerequisite/longhorn-iscsi-installation.yaml
|
||||
|
||||
# Install NFSv4 client on all nodes.
|
||||
# apt-get install nfs-common
|
||||
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/prerequisite/longhorn-nfs-installation.yaml
|
||||
|
||||
apt-get install cryptsetup
|
||||
|
||||
# To check longhorn requirements:
|
||||
# curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/scripts/environment_check.sh | bash
|
22
setup/cluster-nodes/patch.templates/controlplane-node-1.yaml
Normal file
22
setup/cluster-nodes/patch.templates/controlplane-node-1.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
machine:
|
||||
install:
|
||||
disk: {{ .cluster.nodes.control.node1.disk }}
|
||||
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
||||
network:
|
||||
interfaces:
|
||||
- interface: {{ .cluster.nodes.control.node1.interface }}
|
||||
dhcp: false
|
||||
addresses:
|
||||
- {{ .cluster.nodes.control.node1.ip }}/24
|
||||
routes:
|
||||
- network: 0.0.0.0/0
|
||||
gateway: {{ .cloud.router.ip }}
|
||||
vip:
|
||||
ip: {{ .cluster.nodes.control.vip }}
|
||||
cluster:
|
||||
discovery:
|
||||
enabled: true
|
||||
registries:
|
||||
service:
|
||||
disabled: true
|
||||
allowSchedulingOnControlPlanes: true
|
22
setup/cluster-nodes/patch.templates/controlplane-node-2.yaml
Normal file
22
setup/cluster-nodes/patch.templates/controlplane-node-2.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
machine:
|
||||
install:
|
||||
disk: {{ .cluster.nodes.control.node2.disk }}
|
||||
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
||||
network:
|
||||
interfaces:
|
||||
- interface: {{ .cluster.nodes.control.node2.interface }}
|
||||
dhcp: false
|
||||
addresses:
|
||||
- {{ .cluster.nodes.control.node2.ip }}/24
|
||||
routes:
|
||||
- network: 0.0.0.0/0
|
||||
gateway: {{ .cloud.router.ip }}
|
||||
vip:
|
||||
ip: {{ .cluster.nodes.control.vip }}
|
||||
cluster:
|
||||
discovery:
|
||||
enabled: true
|
||||
registries:
|
||||
service:
|
||||
disabled: true
|
||||
allowSchedulingOnControlPlanes: true
|
22
setup/cluster-nodes/patch.templates/controlplane-node-3.yaml
Normal file
22
setup/cluster-nodes/patch.templates/controlplane-node-3.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
machine:
|
||||
install:
|
||||
disk: {{ .cluster.nodes.control.node3.disk }}
|
||||
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
||||
network:
|
||||
interfaces:
|
||||
- interface: {{ .cluster.nodes.control.node3.interface }}
|
||||
dhcp: false
|
||||
addresses:
|
||||
- {{ .cluster.nodes.control.node3.ip }}/24
|
||||
routes:
|
||||
- network: 0.0.0.0/0
|
||||
gateway: {{ .cloud.router.ip }}
|
||||
vip:
|
||||
ip: {{ .cluster.nodes.control.vip }}
|
||||
cluster:
|
||||
discovery:
|
||||
enabled: true
|
||||
registries:
|
||||
service:
|
||||
disabled: true
|
||||
allowSchedulingOnControlPlanes: true
|
22
setup/cluster-nodes/patch.templates/worker.yaml
Normal file
22
setup/cluster-nodes/patch.templates/worker.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
machine:
|
||||
install:
|
||||
disk: /dev/sdc
|
||||
network:
|
||||
interfaces:
|
||||
- interface: enp4s0
|
||||
dhcp: true
|
||||
kubelet:
|
||||
extraMounts:
|
||||
- destination: /var/lib/longhorn
|
||||
type: bind
|
||||
source: /var/lib/longhorn
|
||||
options:
|
||||
- bind
|
||||
- rshared
|
||||
- rw
|
||||
# NOTE: System extensions need to be added via Talos Image Factory
|
||||
# customization:
|
||||
# systemExtensions:
|
||||
# officialExtensions:
|
||||
# - siderolabs/iscsi-tools
|
||||
# - siderolabs/util-linux-tools
|
0
setup/cluster-nodes/patch/.gitkeep
Normal file
0
setup/cluster-nodes/patch/.gitkeep
Normal file
@@ -1,17 +0,0 @@
|
||||
machine:
|
||||
install:
|
||||
disk: /dev/sdc
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.8.20
|
||||
- interface: eth1
|
||||
dhcp: true
|
||||
cluster:
|
||||
discovery:
|
||||
enabled: true
|
||||
registries:
|
||||
service:
|
||||
disabled: true
|
||||
allowSchedulingOnControlPlanes: true
|
@@ -1,3 +0,0 @@
|
||||
machine:
|
||||
install:
|
||||
disk: /dev/sdc
|
@@ -19,22 +19,16 @@ Internet → External DNS → MetalLB LoadBalancer → Traefik → Kubernetes Se
|
||||
|
||||
## Key Components
|
||||
|
||||
- **MetalLB** - Provides load balancing for bare metal clusters
|
||||
- **Traefik** - Handles ingress traffic, TLS termination, and routing
|
||||
- **cert-manager** - Manages TLS certificates
|
||||
- **CoreDNS** - Provides DNS resolution for services
|
||||
- **Longhorn** - Distributed storage system for persistent volumes
|
||||
- **NFS** - Network file system for shared media storage (optional)
|
||||
- **Kubernetes Dashboard** - Web UI for cluster management (accessible via https://dashboard.internal.${DOMAIN})
|
||||
- **Docker Registry** - Private container registry for custom images
|
||||
|
||||
## Configuration Approach
|
||||
|
||||
All infrastructure components use a consistent configuration approach:
|
||||
|
||||
1. **Environment Variables** - All configuration settings are managed using environment variables loaded by running `source load-env.sh`
|
||||
2. **Template Files** - Configuration files use templates with `${VARIABLE}` syntax
|
||||
3. **Setup Scripts** - Each component has a dedicated script in `infrastructure_setup/` for installation and configuration
|
||||
- **[MetalLB](metallb/README.md)** - Provides load balancing for bare metal clusters
|
||||
- **[Traefik](traefik/README.md)** - Handles ingress traffic, TLS termination, and routing
|
||||
- **[cert-manager](cert-manager/README.md)** - Manages TLS certificates
|
||||
- **[CoreDNS](coredns/README.md)** - Provides DNS resolution for services
|
||||
- **[ExternalDNS](externaldns/README.md)** - Automatic DNS record management
|
||||
- **[Longhorn](longhorn/README.md)** - Distributed storage system for persistent volumes
|
||||
- **[NFS](nfs/README.md)** - Network file system for shared media storage (optional)
|
||||
- **[Kubernetes Dashboard](kubernetes-dashboard/README.md)** - Web UI for cluster management (accessible via https://dashboard.internal.${DOMAIN})
|
||||
- **[Docker Registry](docker-registry/README.md)** - Private container registry for custom images
|
||||
- **[Utils](utils/README.md)** - Cluster utilities and debugging tools
|
||||
|
||||
## Idempotent Design
|
||||
|
||||
@@ -47,55 +41,3 @@ All setup scripts are designed to be idempotent:
|
||||
- Changes to configuration will be properly applied on subsequent runs
|
||||
|
||||
This idempotent approach ensures consistent, reliable infrastructure setup and allows for incremental changes without requiring a complete teardown and rebuild.
|
||||
|
||||
## NFS Setup (Optional)
|
||||
|
||||
The infrastructure supports optional NFS (Network File System) for shared media storage across the cluster:
|
||||
|
||||
### Host Setup
|
||||
|
||||
First, set up the NFS server on your chosen host:
|
||||
|
||||
```bash
|
||||
# Set required environment variables
|
||||
export NFS_HOST=box-01 # Hostname or IP of NFS server
|
||||
export NFS_MEDIA_PATH=/data/media # Path to media directory
|
||||
export NFS_STORAGE_CAPACITY=1Ti # Optional: PV size (default: 250Gi)
|
||||
|
||||
# Run host setup script on the NFS server
|
||||
./infrastructure_setup/setup-nfs-host.sh
|
||||
```
|
||||
|
||||
### Cluster Integration
|
||||
|
||||
Then integrate NFS with your Kubernetes cluster:
|
||||
|
||||
```bash
|
||||
# Run cluster setup (part of setup-all.sh or standalone)
|
||||
./infrastructure_setup/setup-nfs.sh
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
- **Automatic IP detection** - Uses network IP even when hostname resolves to localhost
|
||||
- **Cluster-wide access** - Any pod can mount the NFS share regardless of node placement
|
||||
- **Configurable capacity** - Set PersistentVolume size via `NFS_STORAGE_CAPACITY`
|
||||
- **ReadWriteMany** - Multiple pods can simultaneously access the same storage
|
||||
|
||||
### Usage
|
||||
|
||||
Applications can use NFS storage by setting `storageClassName: nfs` in their PVCs:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: media-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
storageClassName: nfs
|
||||
resources:
|
||||
requests:
|
||||
storage: 100Gi
|
||||
```
|
||||
|
0
setup/cluster/cert-manager/README.md
Normal file
0
setup/cluster/cert-manager/README.md
Normal file
@@ -1,20 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Navigate to script directory
|
||||
SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Source environment variables
|
||||
if [[ -f "../load-env.sh" ]]; then
|
||||
source ../load-env.sh
|
||||
if [ -z "${WC_HOME}" ]; then
|
||||
echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Setting up cert-manager..."
|
||||
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster"
|
||||
CERT_MANAGER_DIR="${CLUSTER_SETUP_DIR}/cert-manager"
|
||||
|
||||
# Create cert-manager namespace
|
||||
kubectl create namespace cert-manager --dry-run=client -o yaml | kubectl apply -f -
|
||||
# Process templates with wild-compile-template-dir
|
||||
echo "Processing cert-manager templates..."
|
||||
wild-compile-template-dir --clean ${CERT_MANAGER_DIR}/kustomize.template ${CERT_MANAGER_DIR}/kustomize
|
||||
|
||||
echo "Setting up cert-manager..."
|
||||
|
||||
# Install cert-manager using the official installation method
|
||||
# This installs CRDs, controllers, and webhook components
|
||||
@@ -34,23 +33,12 @@ echo "Waiting additional time for cert-manager webhook to be fully operational..
|
||||
sleep 30
|
||||
|
||||
# Setup Cloudflare API token for DNS01 challenges
|
||||
if [[ -n "${CLOUDFLARE_API_TOKEN}" ]]; then
|
||||
echo "Creating Cloudflare API token secret in cert-manager namespace..."
|
||||
kubectl create secret generic cloudflare-api-token \
|
||||
--namespace cert-manager \
|
||||
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
else
|
||||
echo "Warning: CLOUDFLARE_API_TOKEN not set. DNS01 challenges will not work."
|
||||
fi
|
||||
|
||||
echo "Creating Let's Encrypt issuers..."
|
||||
cat ${SCRIPT_DIR}/cert-manager/letsencrypt-staging-dns01.yaml | envsubst | kubectl apply -f -
|
||||
cat ${SCRIPT_DIR}/cert-manager/letsencrypt-prod-dns01.yaml | envsubst | kubectl apply -f -
|
||||
|
||||
# Wait for issuers to be ready
|
||||
echo "Waiting for Let's Encrypt issuers to be ready..."
|
||||
sleep 10
|
||||
echo "Creating Cloudflare API token secret..."
|
||||
CLOUDFLARE_API_TOKEN=$(wild-secret cluster.certManager.cloudflare.apiToken) || exit 1
|
||||
kubectl create secret generic cloudflare-api-token \
|
||||
--namespace cert-manager \
|
||||
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Configure cert-manager to use external DNS for challenge verification
|
||||
echo "Configuring cert-manager to use external DNS servers..."
|
||||
@@ -75,10 +63,13 @@ spec:
|
||||
echo "Waiting for cert-manager to restart with new DNS configuration..."
|
||||
kubectl rollout status deployment/cert-manager -n cert-manager --timeout=120s
|
||||
|
||||
# Apply wildcard certificates
|
||||
echo "Creating wildcard certificates..."
|
||||
cat ${SCRIPT_DIR}/cert-manager/internal-wildcard-certificate.yaml | envsubst | kubectl apply -f -
|
||||
cat ${SCRIPT_DIR}/cert-manager/wildcard-certificate.yaml | envsubst | kubectl apply -f -
|
||||
# Apply Let's Encrypt issuers and certificates using kustomize
|
||||
echo "Creating Let's Encrypt issuers and certificates..."
|
||||
kubectl apply -k ${CERT_MANAGER_DIR}/kustomize
|
||||
|
||||
# Wait for issuers to be ready
|
||||
echo "Waiting for Let's Encrypt issuers to be ready..."
|
||||
sleep 10
|
||||
echo "Wildcard certificate creation initiated. This may take some time to complete depending on DNS propagation."
|
||||
|
||||
# Wait for the certificates to be issued (with a timeout)
|
||||
@@ -91,3 +82,4 @@ echo ""
|
||||
echo "To verify the installation:"
|
||||
echo " kubectl get pods -n cert-manager"
|
||||
echo " kubectl get clusterissuers"
|
||||
echo " kubectl get certificates -n cert-manager"
|
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: wildcard-internal-wild-cloud
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
secretName: wildcard-internal-wild-cloud-tls
|
||||
dnsNames:
|
||||
- "*.{{ .cloud.internalDomain }}"
|
||||
- "{{ .cloud.internalDomain }}"
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
duration: 2160h # 90 days
|
||||
renewBefore: 360h # 15 days
|
||||
privateKey:
|
||||
algorithm: RSA
|
||||
size: 2048
|
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- letsencrypt-staging-dns01.yaml
|
||||
- letsencrypt-prod-dns01.yaml
|
||||
- internal-wildcard-certificate.yaml
|
||||
- wildcard-certificate.yaml
|
||||
|
||||
# Note: cert-manager.yaml contains the main installation manifests
|
||||
# but is applied separately via URL in the install script
|
@@ -0,0 +1,26 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
email: {{ .operator.email }}
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
solvers:
|
||||
# DNS-01 solver for wildcard certificates
|
||||
- dns01:
|
||||
cloudflare:
|
||||
email: {{ .operator.email }}
|
||||
apiTokenSecretRef:
|
||||
name: cloudflare-api-token
|
||||
key: api-token
|
||||
selector:
|
||||
dnsZones:
|
||||
- "{{ .cluster.certManager.cloudflare.domain }}"
|
||||
# Keep the HTTP-01 solver for non-wildcard certificates
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
@@ -0,0 +1,26 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
email: {{ .operator.email }}
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
solvers:
|
||||
# DNS-01 solver for wildcard certificates
|
||||
- dns01:
|
||||
cloudflare:
|
||||
email: {{ .operator.email }}
|
||||
apiTokenSecretRef:
|
||||
name: cloudflare-api-token
|
||||
key: api-token
|
||||
selector:
|
||||
dnsZones:
|
||||
- "{{ .cluster.certManager.cloudflare.domain }}"
|
||||
# Keep the HTTP-01 solver for non-wildcard certificates
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
@@ -0,0 +1,19 @@
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: wildcard-wild-cloud
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
secretName: wildcard-wild-cloud-tls
|
||||
dnsNames:
|
||||
- "*.{{ .cloud.domain }}"
|
||||
- "{{ .cloud.domain }}"
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
duration: 2160h # 90 days
|
||||
renewBefore: 360h # 15 days
|
||||
privateKey:
|
||||
algorithm: RSA
|
||||
size: 2048
|
5623
setup/cluster/cert-manager/kustomize/cert-manager.yaml
Normal file
5623
setup/cluster/cert-manager/kustomize/cert-manager.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,8 @@ metadata:
|
||||
spec:
|
||||
secretName: wildcard-internal-wild-cloud-tls
|
||||
dnsNames:
|
||||
- "*.internal.${DOMAIN}"
|
||||
- "internal.${DOMAIN}"
|
||||
- "*.internal.cloud2.payne.io"
|
||||
- "internal.cloud2.payne.io"
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
12
setup/cluster/cert-manager/kustomize/kustomization.yaml
Normal file
12
setup/cluster/cert-manager/kustomize/kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- letsencrypt-staging-dns01.yaml
|
||||
- letsencrypt-prod-dns01.yaml
|
||||
- internal-wildcard-certificate.yaml
|
||||
- wildcard-certificate.yaml
|
||||
|
||||
# Note: cert-manager.yaml contains the main installation manifests
|
||||
# but is applied separately via URL in the install script
|
@@ -5,7 +5,7 @@ metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
email: ${EMAIL}
|
||||
email: paul@payne.io
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
@@ -13,13 +13,13 @@ spec:
|
||||
# DNS-01 solver for wildcard certificates
|
||||
- dns01:
|
||||
cloudflare:
|
||||
email: ${EMAIL}
|
||||
email: paul@payne.io
|
||||
apiTokenSecretRef:
|
||||
name: cloudflare-api-token
|
||||
key: api-token
|
||||
selector:
|
||||
dnsZones:
|
||||
- "${CLOUDFLARE_DOMAIN}"
|
||||
- "payne.io"
|
||||
# Keep the HTTP-01 solver for non-wildcard certificates
|
||||
- http01:
|
||||
ingress:
|
@@ -5,7 +5,7 @@ metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
email: ${EMAIL}
|
||||
email: paul@payne.io
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
@@ -13,13 +13,13 @@ spec:
|
||||
# DNS-01 solver for wildcard certificates
|
||||
- dns01:
|
||||
cloudflare:
|
||||
email: ${EMAIL}
|
||||
email: paul@payne.io
|
||||
apiTokenSecretRef:
|
||||
name: cloudflare-api-token
|
||||
key: api-token
|
||||
selector:
|
||||
dnsZones:
|
||||
- "${CLOUDFLARE_DOMAIN}"
|
||||
- "payne.io"
|
||||
# Keep the HTTP-01 solver for non-wildcard certificates
|
||||
- http01:
|
||||
ingress:
|
4
setup/cluster/cert-manager/kustomize/namespace.yaml
Normal file
4
setup/cluster/cert-manager/kustomize/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cert-manager
|
@@ -7,8 +7,8 @@ metadata:
|
||||
spec:
|
||||
secretName: wildcard-wild-cloud-tls
|
||||
dnsNames:
|
||||
- "*.${DOMAIN}"
|
||||
- "${DOMAIN}"
|
||||
- "*.cloud2.payne.io"
|
||||
- "cloud2.payne.io"
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
@@ -19,31 +19,27 @@ Any query for a resource in the `internal.$DOMAIN` domain will be given the IP o
|
||||
|
||||
## Default CoreDNS Configuration
|
||||
|
||||
Found at: https://github.com/k3s-io/k3s/blob/master/manifests/coredns.yaml
|
||||
|
||||
This is k3s default CoreDNS configuration, for reference:
|
||||
This is the default CoreDNS configuration, for reference:
|
||||
|
||||
```txt
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
health { lameduck 5s }
|
||||
ready
|
||||
kubernetes %{CLUSTER_DOMAIN}% in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
hosts /etc/coredns/NodeHosts {
|
||||
ttl 60
|
||||
reload 15s
|
||||
fallthrough
|
||||
}
|
||||
log . { class error }
|
||||
prometheus :9153
|
||||
forward . /etc/resolv.conf
|
||||
cache 30
|
||||
kubernetes cluster.local in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
forward . /etc/resolv.conf { max_concurrent 1000 }
|
||||
cache 30 {
|
||||
disable success cluster.local
|
||||
disable denial cluster.local
|
||||
}
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
import /etc/coredns/custom/*.override
|
||||
}
|
||||
import /etc/coredns/custom/*.server
|
||||
```
|
||||
|
37
setup/cluster/coredns/install.sh
Executable file
37
setup/cluster/coredns/install.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z "${WC_HOME}" ]; then
|
||||
echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster"
|
||||
COREDNS_DIR="${CLUSTER_SETUP_DIR}/coredns"
|
||||
|
||||
echo "Setting up CoreDNS for k3s..."
|
||||
|
||||
# Process templates with wild-compile-template-dir
|
||||
echo "Processing CoreDNS templates..."
|
||||
wild-compile-template-dir --clean ${COREDNS_DIR}/kustomize.template ${COREDNS_DIR}/kustomize
|
||||
|
||||
# Apply the k3s-compatible custom DNS override (k3s will preserve this)
|
||||
echo "Applying CoreDNS custom override configuration..."
|
||||
kubectl apply -f "${COREDNS_DIR}/kustomize/coredns-custom-config.yaml"
|
||||
|
||||
# Apply the LoadBalancer service for external access to CoreDNS
|
||||
echo "Applying CoreDNS service configuration..."
|
||||
kubectl apply -f "${COREDNS_DIR}/kustomize/coredns-lb-service.yaml"
|
||||
|
||||
# Restart CoreDNS pods to apply the changes
|
||||
echo "Restarting CoreDNS pods to apply changes..."
|
||||
kubectl rollout restart deployment/coredns -n kube-system
|
||||
kubectl rollout status deployment/coredns -n kube-system
|
||||
|
||||
echo "CoreDNS setup complete!"
|
||||
echo
|
||||
echo "To verify the installation:"
|
||||
echo " kubectl get pods -n kube-system"
|
||||
echo " kubectl get svc -n kube-system coredns"
|
||||
echo " kubectl describe svc -n kube-system coredns"
|
||||
echo " kubectl logs -n kube-system -l k8s-app=kube-dns -f"
|
@@ -0,0 +1,28 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns-custom
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Custom server block for internal domains. All internal domains should
|
||||
# resolve to the cluster proxy.
|
||||
internal.server: |
|
||||
{{ .cloud.internalDomain }} {
|
||||
errors
|
||||
cache 30
|
||||
reload
|
||||
template IN A {
|
||||
match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\.
|
||||
answer "{{`{{ .Name }}`}} 60 IN A {{ .cluster.loadBalancerIp }}"
|
||||
}
|
||||
template IN AAAA {
|
||||
match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\.
|
||||
rcode NXDOMAIN
|
||||
}
|
||||
}
|
||||
# Custom override to set external resolvers.
|
||||
external.override: |
|
||||
forward . {{ .cloud.dns.externalResolver }} {
|
||||
max_concurrent 1000
|
||||
}
|
@@ -0,0 +1,25 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: coredns-lb
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
metallb.universe.tf/loadBalancerIPs: "{{ .cluster.loadBalancerIp }}"
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
targetPort: 53
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
targetPort: 53
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
targetPort: 9153
|
||||
selector:
|
||||
k8s-app: kube-dns
|
@@ -8,21 +8,21 @@ data:
|
||||
# Custom server block for internal domains. All internal domains should
|
||||
# resolve to the cluster proxy.
|
||||
internal.server: |
|
||||
internal.cloud.payne.io {
|
||||
internal.cloud2.payne.io {
|
||||
errors
|
||||
cache 30
|
||||
reload
|
||||
template IN A {
|
||||
match (.*)\.internal\.cloud\.payne\.io\.
|
||||
answer "{{ .Name }} 60 IN A 192.168.8.240"
|
||||
match (.*)\.internal\.cloud2\.payne\.io\.
|
||||
answer "{{ .Name }} 60 IN A 192.168.8.20"
|
||||
}
|
||||
template IN AAAA {
|
||||
match (.*)\.internal\.cloud\.payne\.io\.
|
||||
match (.*)\.internal\.cloud2\.payne\.io\.
|
||||
rcode NXDOMAIN
|
||||
}
|
||||
}
|
||||
# Custom override to set external resolvers.
|
||||
external.override: |
|
||||
forward . 1.1.1.1 8.8.8.8 {
|
||||
forward . 1.1.1.1 {
|
||||
max_concurrent 1000
|
||||
}
|
@@ -5,7 +5,7 @@ metadata:
|
||||
name: coredns-lb
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
metallb.universe.tf/loadBalancerIPs: "192.168.8.241"
|
||||
metallb.universe.tf/loadBalancerIPs: "192.168.8.20"
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
0
setup/cluster/docker-registry/README.md
Normal file
0
setup/cluster/docker-registry/README.md
Normal file
@@ -1,2 +0,0 @@
|
||||
DOCKER_REGISTRY_STORAGE=10Gi
|
||||
DOCKER_REGISTRY_HOST=docker-registry.$INTERNAL_DOMAIN
|
28
setup/cluster/docker-registry/install.sh
Executable file
28
setup/cluster/docker-registry/install.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z "${WC_HOME}" ]; then
|
||||
echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster"
|
||||
DOCKER_REGISTRY_DIR="${CLUSTER_SETUP_DIR}/docker-registry"
|
||||
|
||||
echo "Setting up Docker Registry..."
|
||||
|
||||
# Process templates with wild-compile-template-dir
|
||||
echo "Processing Docker Registry templates..."
|
||||
wild-compile-template-dir --clean ${DOCKER_REGISTRY_DIR}/kustomize.template ${DOCKER_REGISTRY_DIR}/kustomize
|
||||
|
||||
# Apply the docker registry manifests using kustomize
|
||||
kubectl apply -k "${DOCKER_REGISTRY_DIR}/kustomize"
|
||||
|
||||
echo "Waiting for Docker Registry to be ready..."
|
||||
kubectl wait --for=condition=available --timeout=300s deployment/docker-registry -n docker-registry
|
||||
|
||||
echo "Docker Registry setup complete!"
|
||||
|
||||
# Show deployment status
|
||||
kubectl get pods -n docker-registry
|
||||
kubectl get services -n docker-registry
|
@@ -1,40 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: docker-registry
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
pairs:
|
||||
app: docker-registry
|
||||
managedBy: wild-cloud
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
||||
configMapGenerator:
|
||||
- name: docker-registry-config
|
||||
envs:
|
||||
- config/config.env
|
||||
replacements:
|
||||
- source:
|
||||
kind: ConfigMap
|
||||
name: docker-registry-config
|
||||
fieldPath: data.DOCKER_REGISTRY_STORAGE
|
||||
targets:
|
||||
- select:
|
||||
kind: PersistentVolumeClaim
|
||||
name: docker-registry-pvc
|
||||
fieldPaths:
|
||||
- spec.resources.requests.storage
|
||||
- source:
|
||||
kind: ConfigMap
|
||||
name: docker-registry-config
|
||||
fieldPath: data.DOCKER_REGISTRY_HOST
|
||||
targets:
|
||||
- select:
|
||||
kind: Ingress
|
||||
name: docker-registry
|
||||
fieldPaths:
|
||||
- spec.rules.0.host
|
||||
- spec.tls.0.hosts.0
|
@@ -4,7 +4,7 @@ metadata:
|
||||
name: docker-registry
|
||||
spec:
|
||||
rules:
|
||||
- host: docker-registry.internal.${DOMAIN}
|
||||
- host: {{ .cloud.dockerRegistryHost }}
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
@@ -16,5 +16,5 @@ spec:
|
||||
number: 5000
|
||||
tls:
|
||||
- hosts:
|
||||
- docker-registry.internal.${DOMAIN}
|
||||
- {{ .cloud.dockerRegistryHost }}
|
||||
secretName: wildcard-internal-wild-cloud-tls
|
@@ -0,0 +1,14 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: docker-registry
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
pairs:
|
||||
app: docker-registry
|
||||
managedBy: wild-cloud
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
12
setup/cluster/docker-registry/kustomize.template/pvc.yaml
Normal file
12
setup/cluster/docker-registry/kustomize.template/pvc.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: docker-registry-pvc
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .cluster.dockerRegistry.storage }}
|
36
setup/cluster/docker-registry/kustomize/deployment.yaml
Normal file
36
setup/cluster/docker-registry/kustomize/deployment.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: docker-registry
|
||||
labels:
|
||||
app: docker-registry
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docker-registry
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docker-registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry:3.0.0
|
||||
name: docker-registry
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/registry
|
||||
name: docker-registry-storage
|
||||
readOnly: false
|
||||
volumes:
|
||||
- name: docker-registry-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: docker-registry-pvc
|
20
setup/cluster/docker-registry/kustomize/ingress.yaml
Normal file
20
setup/cluster/docker-registry/kustomize/ingress.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: docker-registry
|
||||
spec:
|
||||
rules:
|
||||
- host: docker-registry.internal.cloud2.payne.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: docker-registry
|
||||
port:
|
||||
number: 5000
|
||||
tls:
|
||||
- hosts:
|
||||
- docker-registry.internal.cloud2.payne.io
|
||||
secretName: wildcard-internal-wild-cloud-tls
|
14
setup/cluster/docker-registry/kustomize/kustomization.yaml
Normal file
14
setup/cluster/docker-registry/kustomize/kustomization.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: docker-registry
|
||||
labels:
|
||||
- includeSelectors: true
|
||||
pairs:
|
||||
app: docker-registry
|
||||
managedBy: wild-cloud
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
- namespace.yaml
|
||||
- pvc.yaml
|
4
setup/cluster/docker-registry/kustomize/namespace.yaml
Normal file
4
setup/cluster/docker-registry/kustomize/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: docker-registry
|
13
setup/cluster/docker-registry/kustomize/service.yaml
Normal file
13
setup/cluster/docker-registry/kustomize/service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: docker-registry
|
||||
labels:
|
||||
app: docker-registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
targetPort: 5000
|
||||
selector:
|
||||
app: docker-registry
|
42
setup/cluster/externaldns/install.sh
Executable file
42
setup/cluster/externaldns/install.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z "${WC_HOME}" ]; then
|
||||
echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster"
|
||||
EXTERNALDNS_DIR="${CLUSTER_SETUP_DIR}/externaldns"
|
||||
|
||||
# Process templates with wild-compile-template-dir
|
||||
echo "Processing ExternalDNS templates..."
|
||||
wild-compile-template-dir --clean ${EXTERNALDNS_DIR}/kustomize.template ${EXTERNALDNS_DIR}/kustomize
|
||||
|
||||
echo "Setting up ExternalDNS..."
|
||||
|
||||
# Apply ExternalDNS manifests using kustomize
|
||||
echo "Deploying ExternalDNS..."
|
||||
kubectl apply -k ${EXTERNALDNS_DIR}/kustomize
|
||||
|
||||
# Setup Cloudflare API token secret
|
||||
echo "Creating Cloudflare API token secret..."
|
||||
CLOUDFLARE_API_TOKEN=$(wild-secret cluster.certManager.cloudflare.apiToken) || exit 1
|
||||
kubectl create secret generic cloudflare-api-token \
|
||||
--namespace externaldns \
|
||||
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Wait for ExternalDNS to be ready
|
||||
echo "Waiting for Cloudflare ExternalDNS to be ready..."
|
||||
kubectl rollout status deployment/external-dns -n externaldns --timeout=60s
|
||||
|
||||
# echo "Waiting for CoreDNS ExternalDNS to be ready..."
|
||||
# kubectl rollout status deployment/external-dns-coredns -n externaldns --timeout=60s
|
||||
|
||||
echo "ExternalDNS setup complete!"
|
||||
echo ""
|
||||
echo "To verify the installation:"
|
||||
echo " kubectl get pods -n externaldns"
|
||||
echo " kubectl logs -n externaldns -l app=external-dns -f"
|
||||
echo " kubectl logs -n externaldns -l app=external-dns-coredns -f"
|
@@ -0,0 +1,39 @@
|
||||
---
|
||||
# CloudFlare provider for ExternalDNS
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: external-dns
|
||||
namespace: externaldns
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: external-dns
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: external-dns
|
||||
spec:
|
||||
serviceAccountName: external-dns
|
||||
containers:
|
||||
- name: external-dns
|
||||
image: registry.k8s.io/external-dns/external-dns:v0.13.4
|
||||
args:
|
||||
- --source=service
|
||||
- --source=ingress
|
||||
- --txt-owner-id={{ .cluster.externalDns.ownerId }}
|
||||
- --provider=cloudflare
|
||||
- --domain-filter=payne.io
|
||||
#- --exclude-domains=internal.${DOMAIN}
|
||||
- --cloudflare-dns-records-per-page=5000
|
||||
- --publish-internal-services
|
||||
- --no-cloudflare-proxied
|
||||
- --log-level=debug
|
||||
env:
|
||||
- name: CF_API_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: cloudflare-api-token
|
||||
key: api-token
|
@@ -0,0 +1,7 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- externaldns-rbac.yaml
|
||||
- externaldns-cloudflare.yaml
|
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: externaldns
|
@@ -23,7 +23,7 @@ spec:
|
||||
args:
|
||||
- --source=service
|
||||
- --source=ingress
|
||||
- --txt-owner-id=${OWNER_ID}
|
||||
- --txt-owner-id=cloud-payne-io-cluster
|
||||
- --provider=cloudflare
|
||||
- --domain-filter=payne.io
|
||||
#- --exclude-domains=internal.${DOMAIN}
|
35
setup/cluster/externaldns/kustomize/externaldns-rbac.yaml
Normal file
35
setup/cluster/externaldns/kustomize/externaldns-rbac.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
# Common RBAC resources for all ExternalDNS deployments
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: external-dns
|
||||
namespace: externaldns
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: external-dns
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints", "pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["extensions", "networking.k8s.io"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: external-dns-viewer
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-dns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: external-dns
|
||||
namespace: externaldns
|
7
setup/cluster/externaldns/kustomize/kustomization.yaml
Normal file
7
setup/cluster/externaldns/kustomize/kustomization.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- externaldns-rbac.yaml
|
||||
- externaldns-cloudflare.yaml
|
4
setup/cluster/externaldns/kustomize/namespace.yaml
Normal file
4
setup/cluster/externaldns/kustomize/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: externaldns
|
34
setup/cluster/install-all.sh
Executable file
34
setup/cluster/install-all.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Navigate to script directory
|
||||
SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")"
|
||||
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
echo "Setting up your wild-cloud cluster services..."
|
||||
echo
|
||||
|
||||
./metallb/install.sh
|
||||
./longhorn/install.sh
|
||||
./traefik/install.sh
|
||||
./coredns/install.sh
|
||||
./cert-manager/install.sh
|
||||
./externaldns/install.sh
|
||||
./kubernetes-dashboard/install.sh
|
||||
./nfs/install.sh
|
||||
./docker-registry/install.sh
|
||||
|
||||
echo "Infrastructure setup complete!"
|
||||
echo
|
||||
echo "Next steps:"
|
||||
echo "1. Install Helm charts for non-infrastructure components"
|
||||
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain)
|
||||
echo "2. Access the dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
||||
echo "3. Get the dashboard token with: ./bin/dashboard-token"
|
||||
echo
|
||||
echo "To verify components, run:"
|
||||
echo "- kubectl get pods -n cert-manager"
|
||||
echo "- kubectl get pods -n externaldns"
|
||||
echo "- kubectl get pods -n kubernetes-dashboard"
|
||||
echo "- kubectl get clusterissuers"
|
0
setup/cluster/kubernetes-dashboard/README.md
Normal file
0
setup/cluster/kubernetes-dashboard/README.md
Normal file
60
setup/cluster/kubernetes-dashboard/install.sh
Executable file
60
setup/cluster/kubernetes-dashboard/install.sh
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z "${WC_HOME}" ]; then
|
||||
echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster"
|
||||
KUBERNETES_DASHBOARD_DIR="${CLUSTER_SETUP_DIR}/kubernetes-dashboard"
|
||||
|
||||
echo "Setting up Kubernetes Dashboard..."
|
||||
|
||||
# Process templates with wild-compile-template-dir
|
||||
echo "Processing Dashboard templates..."
|
||||
wild-compile-template-dir --clean ${KUBERNETES_DASHBOARD_DIR}/kustomize.template ${KUBERNETES_DASHBOARD_DIR}/kustomize
|
||||
|
||||
NAMESPACE="kubernetes-dashboard"
|
||||
|
||||
# Apply the official dashboard installation
|
||||
echo "Installing Kubernetes Dashboard core components..."
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
|
||||
|
||||
# Wait for cert-manager certificates to be ready
|
||||
echo "Waiting for cert-manager certificates to be ready..."
|
||||
kubectl wait --for=condition=Ready certificate wildcard-internal-wild-cloud -n cert-manager --timeout=300s || echo "Warning: Internal wildcard certificate not ready yet"
|
||||
kubectl wait --for=condition=Ready certificate wildcard-wild-cloud -n cert-manager --timeout=300s || echo "Warning: Wildcard certificate not ready yet"
|
||||
|
||||
# Copying cert-manager secrets to the dashboard namespace (if available)
|
||||
echo "Copying cert-manager secrets to dashboard namespace..."
|
||||
if kubectl get secret wildcard-internal-wild-cloud-tls -n cert-manager >/dev/null 2>&1; then
|
||||
copy-secret cert-manager:wildcard-internal-wild-cloud-tls $NAMESPACE
|
||||
else
|
||||
echo "Warning: wildcard-internal-wild-cloud-tls secret not yet available"
|
||||
fi
|
||||
|
||||
if kubectl get secret wildcard-wild-cloud-tls -n cert-manager >/dev/null 2>&1; then
|
||||
copy-secret cert-manager:wildcard-wild-cloud-tls $NAMESPACE
|
||||
else
|
||||
echo "Warning: wildcard-wild-cloud-tls secret not yet available"
|
||||
fi
|
||||
|
||||
# Apply dashboard customizations using kustomize
|
||||
echo "Applying dashboard customizations..."
|
||||
kubectl apply -k "${KUBERNETES_DASHBOARD_DIR}/kustomize"
|
||||
|
||||
# Restart CoreDNS to pick up the changes
|
||||
kubectl delete pods -n kube-system -l k8s-app=kube-dns
|
||||
echo "Restarted CoreDNS to pick up DNS changes"
|
||||
|
||||
# Wait for dashboard to be ready
|
||||
echo "Waiting for Kubernetes Dashboard to be ready..."
|
||||
kubectl rollout status deployment/kubernetes-dashboard -n $NAMESPACE --timeout=60s
|
||||
|
||||
echo "Kubernetes Dashboard setup complete!"
|
||||
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain) || exit 1
|
||||
echo "Access the dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
||||
echo ""
|
||||
echo "To get the authentication token, run:"
|
||||
echo "wild-dashboard-token"
|
@@ -1,6 +1,6 @@
|
||||
---
|
||||
# Internal-only middleware
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: internal-only
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
|
||||
---
|
||||
# HTTPS redirect middleware
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: dashboard-redirect-scheme
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
|
||||
---
|
||||
# IngressRoute for Dashboard
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: kubernetes-dashboard-https
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`dashboard.internal.${DOMAIN}`)
|
||||
- match: Host(`dashboard.{{ .cloud.internalDomain }}`)
|
||||
kind: Rule
|
||||
middlewares:
|
||||
- name: internal-only
|
||||
@@ -52,7 +52,7 @@ spec:
|
||||
---
|
||||
# HTTP to HTTPS redirect.
|
||||
# FIXME: Is this needed?
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: kubernetes-dashboard-http
|
||||
@@ -61,7 +61,7 @@ spec:
|
||||
entryPoints:
|
||||
- web
|
||||
routes:
|
||||
- match: Host(`dashboard.internal.${DOMAIN}`)
|
||||
- match: Host(`dashboard.{{ .cloud.internalDomain }}`)
|
||||
kind: Rule
|
||||
middlewares:
|
||||
- name: dashboard-redirect-scheme
|
||||
@@ -74,11 +74,11 @@ spec:
|
||||
---
|
||||
# ServersTransport for HTTPS backend with skip verify.
|
||||
# FIXME: Is this needed?
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: ServersTransport
|
||||
metadata:
|
||||
name: dashboard-transport
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
insecureSkipVerify: true
|
||||
serverName: dashboard.internal.${DOMAIN}
|
||||
serverName: dashboard.{{ .cloud.internalDomain }}
|
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- dashboard-admin-rbac.yaml
|
||||
- dashboard-kube-system.yaml
|
@@ -0,0 +1,32 @@
|
||||
---
|
||||
# Service Account and RBAC for Dashboard admin access
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: dashboard-admin
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: dashboard-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: dashboard-admin
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Token for dashboard-admin
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: dashboard-admin-token
|
||||
namespace: kubernetes-dashboard
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: dashboard-admin
|
||||
type: kubernetes.io/service-account-token
|
@@ -0,0 +1,84 @@
|
||||
---
|
||||
# Internal-only middleware
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: internal-only
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ipWhiteList:
|
||||
# Restrict to local private network ranges
|
||||
sourceRange:
|
||||
- 127.0.0.1/32 # localhost
|
||||
- 10.0.0.0/8 # Private network
|
||||
- 172.16.0.0/12 # Private network
|
||||
- 192.168.0.0/16 # Private network
|
||||
|
||||
---
|
||||
# HTTPS redirect middleware
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: dashboard-redirect-scheme
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
permanent: true
|
||||
|
||||
---
|
||||
# IngressRoute for Dashboard
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: kubernetes-dashboard-https
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
entryPoints:
|
||||
- websecure
|
||||
routes:
|
||||
- match: Host(`dashboard.internal.cloud2.payne.io`)
|
||||
kind: Rule
|
||||
middlewares:
|
||||
- name: internal-only
|
||||
namespace: kubernetes-dashboard
|
||||
services:
|
||||
- name: kubernetes-dashboard
|
||||
port: 443
|
||||
serversTransport: dashboard-transport
|
||||
tls:
|
||||
secretName: wildcard-internal-wild-cloud-tls
|
||||
|
||||
---
|
||||
# HTTP to HTTPS redirect.
|
||||
# FIXME: Is this needed?
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: IngressRoute
|
||||
metadata:
|
||||
name: kubernetes-dashboard-http
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
entryPoints:
|
||||
- web
|
||||
routes:
|
||||
- match: Host(`dashboard.internal.cloud2.payne.io`)
|
||||
kind: Rule
|
||||
middlewares:
|
||||
- name: dashboard-redirect-scheme
|
||||
namespace: kubernetes-dashboard
|
||||
services:
|
||||
- name: kubernetes-dashboard
|
||||
port: 443
|
||||
serversTransport: dashboard-transport
|
||||
|
||||
---
|
||||
# ServersTransport for HTTPS backend with skip verify.
|
||||
# FIXME: Is this needed?
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: ServersTransport
|
||||
metadata:
|
||||
name: dashboard-transport
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
insecureSkipVerify: true
|
||||
serverName: dashboard.internal.cloud2.payne.io
|
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- dashboard-admin-rbac.yaml
|
||||
- dashboard-kube-system.yaml
|
21
setup/cluster/longhorn/install.sh
Executable file
21
setup/cluster/longhorn/install.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ -z "${WC_HOME}" ]; then
|
||||
echo "Please source the wildcloud environment first. (e.g., \`source ./env.sh\`)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster"
|
||||
LONGHORN_DIR="${CLUSTER_SETUP_DIR}/longhorn"
|
||||
|
||||
echo "Setting up Longhorn..."
|
||||
|
||||
# Process templates with wild-compile-template-dir
|
||||
echo "Processing Longhorn templates..."
|
||||
wild-compile-template-dir --clean ${LONGHORN_DIR}/kustomize.template ${LONGHORN_DIR}/kustomize
|
||||
|
||||
# Apply Longhorn with kustomize to apply our customizations
|
||||
kubectl apply -k ${LONGHORN_DIR}/kustomize/
|
||||
|
||||
echo "Longhorn setup complete!"
|
@@ -4,6 +4,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: longhorn-system
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
pod-security.kubernetes.io/audit: privileged
|
||||
pod-security.kubernetes.io/warn: privileged
|
||||
---
|
||||
# Source: longhorn/templates/priorityclass.yaml
|
||||
apiVersion: scheduling.k8s.io/v1
|
5
setup/cluster/longhorn/kustomize/kustomization.yaml
Normal file
5
setup/cluster/longhorn/kustomize/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- longhorn.yaml
|
5189
setup/cluster/longhorn/kustomize/longhorn.yaml
Normal file
5189
setup/cluster/longhorn/kustomize/longhorn.yaml
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user