Split out wild-setup into three phase scripts.
This commit is contained in:
212
bin/wild-cluster-config-generate
Executable file
212
bin/wild-cluster-config-generate
Executable file
@@ -0,0 +1,212 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Get WC_ROOT (where this script and templates live)
|
||||||
|
WC_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.."; pwd)"
|
||||||
|
export WC_ROOT
|
||||||
|
|
||||||
|
# Set up cloud directory (WC_HOME is where user's cloud will be)
|
||||||
|
WC_HOME="$(pwd)"
|
||||||
|
export WC_HOME
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
print_header() {
|
||||||
|
echo -e "\n${BLUE}=== $1 ===${NC}\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}INFO:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}WARNING:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}SUCCESS:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}ERROR:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current config value safely
|
||||||
|
get_current_config() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/config.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-config "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usage function
|
||||||
|
usage() {
|
||||||
|
echo "Usage: wild-cluster-config-generate [options]"
|
||||||
|
echo ""
|
||||||
|
echo "Generate initial Talos cluster configuration using talosctl gen config."
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo " --force Force regeneration even if config already exists"
|
||||||
|
echo ""
|
||||||
|
echo "This script will:"
|
||||||
|
echo " - Generate initial cluster secrets and configurations"
|
||||||
|
echo " - Create base controlplane.yaml and worker.yaml templates"
|
||||||
|
echo " - Set up the foundation for node-specific machine configs"
|
||||||
|
echo ""
|
||||||
|
echo "Requirements:"
|
||||||
|
echo " - Must be run from a wild-cloud directory"
|
||||||
|
echo " - Cluster name and VIP must be configured"
|
||||||
|
echo " - talosctl must be available in PATH"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
FORCE=false
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
--force)
|
||||||
|
FORCE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected argument: $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if we're in a wild-cloud directory
|
||||||
|
if [ ! -d ".wildcloud" ]; then
|
||||||
|
print_error "You must run this script from a wild-cloud directory"
|
||||||
|
print_info "Run 'wild-setup' or 'wild-init' first to initialize a wild-cloud project"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if talosctl is available
|
||||||
|
if ! command -v talosctl >/dev/null 2>&1; then
|
||||||
|
print_error "talosctl not found in PATH"
|
||||||
|
print_info "Please install talosctl to generate cluster configurations"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CLUSTER CONFIGURATION GENERATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
print_header "Talos Cluster Configuration Generation"
|
||||||
|
|
||||||
|
# Ensure required directories exist
|
||||||
|
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||||
|
mkdir -p "${NODE_SETUP_DIR}/generated"
|
||||||
|
|
||||||
|
# Check if cluster configuration already exists
|
||||||
|
if [ -f "${NODE_SETUP_DIR}/generated/secrets.yaml" ] && [ "$FORCE" = false ]; then
|
||||||
|
print_success "Cluster configuration already exists"
|
||||||
|
print_info "Generated files:"
|
||||||
|
for file in "${NODE_SETUP_DIR}/generated"/*.yaml; do
|
||||||
|
if [ -f "$file" ]; then
|
||||||
|
print_info " - $(basename "$file")"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
print_info "Use --force to regenerate cluster configuration"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get cluster configuration
|
||||||
|
CLUSTER_NAME=$(get_current_config "cluster.name")
|
||||||
|
VIP=$(get_current_config "cluster.nodes.control.vip")
|
||||||
|
|
||||||
|
# Validate required configuration
|
||||||
|
if [ -z "$CLUSTER_NAME" ] || [ "$CLUSTER_NAME" = "null" ]; then
|
||||||
|
print_error "Cluster name not configured"
|
||||||
|
print_info "Please run 'wild-setup' first to configure cluster.name"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$VIP" ] || [ "$VIP" = "null" ]; then
|
||||||
|
print_error "Control plane VIP not configured"
|
||||||
|
print_info "Please run 'wild-setup' first to configure cluster.nodes.control.vip"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate cluster configuration
|
||||||
|
print_info "Generating initial cluster configuration..."
|
||||||
|
print_info "Cluster name: $CLUSTER_NAME"
|
||||||
|
print_info "Control plane endpoint: https://$VIP:6443"
|
||||||
|
|
||||||
|
if [ "$FORCE" = true ] && [ -d "${NODE_SETUP_DIR}/generated" ]; then
|
||||||
|
print_warning "Removing existing cluster configuration..."
|
||||||
|
rm -rf "${NODE_SETUP_DIR}/generated"
|
||||||
|
mkdir -p "${NODE_SETUP_DIR}/generated"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "${NODE_SETUP_DIR}/generated"
|
||||||
|
talosctl gen config "$CLUSTER_NAME" "https://$VIP:6443"
|
||||||
|
cd - >/dev/null
|
||||||
|
|
||||||
|
# Verify generated files
|
||||||
|
REQUIRED_FILES=("secrets.yaml" "controlplane.yaml" "worker.yaml" "talosconfig")
|
||||||
|
MISSING_FILES=()
|
||||||
|
|
||||||
|
for file in "${REQUIRED_FILES[@]}"; do
|
||||||
|
if [ ! -f "${NODE_SETUP_DIR}/generated/$file" ]; then
|
||||||
|
MISSING_FILES+=("$file")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#MISSING_FILES[@]} -gt 0 ]; then
|
||||||
|
print_error "Some required files were not generated:"
|
||||||
|
for file in "${MISSING_FILES[@]}"; do
|
||||||
|
print_error " - $file"
|
||||||
|
done
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Cluster configuration generated successfully!"
|
||||||
|
echo ""
|
||||||
|
print_info "Generated files:"
|
||||||
|
for file in "${NODE_SETUP_DIR}/generated"/*.yaml "${NODE_SETUP_DIR}/generated/talosconfig"; do
|
||||||
|
if [ -f "$file" ]; then
|
||||||
|
filesize=$(du -h "$file" | cut -f1)
|
||||||
|
print_success " ✓ $(basename "$file") ($filesize)"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Configuration details:"
|
||||||
|
print_info " - Cluster name: $CLUSTER_NAME"
|
||||||
|
print_info " - Control plane endpoint: https://$VIP:6443"
|
||||||
|
print_info " - Generated in: ${NODE_SETUP_DIR}/generated/"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Next steps:"
|
||||||
|
echo " 1. Node-specific machine configs can now be generated"
|
||||||
|
echo " 2. Use wild-cluster-node-machine-config-generate <ip> for each node"
|
||||||
|
echo " 3. Apply configs to nodes with talosctl apply-config"
|
||||||
|
echo " 4. Bootstrap the first control plane node"
|
||||||
|
|
||||||
|
print_success "Cluster configuration generation completed!"
|
@@ -154,33 +154,9 @@ NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
|||||||
|
|
||||||
# Check if cluster has been initialized
|
# Check if cluster has been initialized
|
||||||
if [ ! -f "${NODE_SETUP_DIR}/generated/secrets.yaml" ]; then
|
if [ ! -f "${NODE_SETUP_DIR}/generated/secrets.yaml" ]; then
|
||||||
print_error "Cluster not initialized. You need to run cluster initialization first."
|
print_error "Cluster not initialized. Base cluster configuration is required."
|
||||||
print_info "This typically involves running talosctl gen config to generate initial secrets."
|
print_info "Run 'wild-cluster-config-generate' first to generate cluster secrets and base configs"
|
||||||
|
|
||||||
read -p "Do you want to generate initial cluster secrets now? (y/N): " -r generate_secrets
|
|
||||||
if [[ $generate_secrets =~ ^[Yy]$ ]]; then
|
|
||||||
# Generate cluster secrets
|
|
||||||
CLUSTER_NAME=$(wild-config cluster.name)
|
|
||||||
VIP=$(wild-config cluster.nodes.control.vip)
|
|
||||||
|
|
||||||
if [ -z "$CLUSTER_NAME" ] || [ -z "$VIP" ]; then
|
|
||||||
print_error "Missing cluster configuration. cluster.name and cluster.nodes.control.vip are required."
|
|
||||||
print_info "Run 'wild-setup' or 'wild-init' first to configure your cluster"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
|
||||||
|
|
||||||
print_info "Generating initial cluster configuration..."
|
|
||||||
mkdir -p "${NODE_SETUP_DIR}/generated"
|
|
||||||
|
|
||||||
cd "${NODE_SETUP_DIR}/generated"
|
|
||||||
talosctl gen config "$CLUSTER_NAME" "https://$VIP:6443"
|
|
||||||
cd - >/dev/null
|
|
||||||
|
|
||||||
print_success "Initial cluster configuration generated"
|
|
||||||
else
|
|
||||||
print_warning "Skipping machine config generation - cluster secrets required"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Get cluster configuration from config.yaml
|
# Get cluster configuration from config.yaml
|
||||||
@@ -223,13 +199,15 @@ print_info "Compiling patch template for $NODE_TYPE node $NODE_IP..."
|
|||||||
if [ "$NODE_TYPE" = "control" ]; then
|
if [ "$NODE_TYPE" = "control" ]; then
|
||||||
TEMPLATE_FILE="${TEMPLATE_SOURCE_DIR}/patch.templates/controlplane.yaml"
|
TEMPLATE_FILE="${TEMPLATE_SOURCE_DIR}/patch.templates/controlplane.yaml"
|
||||||
BASE_CONFIG="${NODE_SETUP_DIR}/generated/controlplane.yaml"
|
BASE_CONFIG="${NODE_SETUP_DIR}/generated/controlplane.yaml"
|
||||||
OUTPUT_CONFIG="${NODE_SETUP_DIR}/final/controlplane-${NODE_IP}.yaml"
|
|
||||||
else
|
else
|
||||||
TEMPLATE_FILE="${TEMPLATE_SOURCE_DIR}/patch.templates/worker.yaml"
|
TEMPLATE_FILE="${TEMPLATE_SOURCE_DIR}/patch.templates/worker.yaml"
|
||||||
BASE_CONFIG="${NODE_SETUP_DIR}/generated/worker.yaml"
|
BASE_CONFIG="${NODE_SETUP_DIR}/generated/worker.yaml"
|
||||||
OUTPUT_CONFIG="${NODE_SETUP_DIR}/final/worker-${NODE_IP}.yaml"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Use IP as the patch name and output config name
|
||||||
|
PATCH_FILE="${NODE_SETUP_DIR}/patch/${NODE_IP}.yaml"
|
||||||
|
OUTPUT_CONFIG="${NODE_SETUP_DIR}/final/${NODE_IP}.yaml"
|
||||||
|
|
||||||
# Check if the patch template exists
|
# Check if the patch template exists
|
||||||
if [ ! -f "$TEMPLATE_FILE" ]; then
|
if [ ! -f "$TEMPLATE_FILE" ]; then
|
||||||
print_error "Patch template not found: $TEMPLATE_FILE"
|
print_error "Patch template not found: $TEMPLATE_FILE"
|
||||||
@@ -238,14 +216,14 @@ if [ ! -f "$TEMPLATE_FILE" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a temporary template with the node IP for gomplate processing
|
# Create a temporary template with the node IP for gomplate processing
|
||||||
TEMP_TEMPLATE="/tmp/${NODE_TYPE}-${NODE_IP}-$(date +%s).yaml"
|
TEMP_TEMPLATE="/tmp/${NODE_IP//\//_}-$(date +%s).yaml"
|
||||||
sed "s/{{NODE_IP}}/${NODE_IP}/g" "$TEMPLATE_FILE" > "$TEMP_TEMPLATE"
|
sed "s/{{NODE_IP}}/${NODE_IP}/g" "$TEMPLATE_FILE" > "$TEMP_TEMPLATE"
|
||||||
cat "$TEMP_TEMPLATE" | wild-compile-template > "${NODE_SETUP_DIR}/patch/${NODE_TYPE}-${NODE_IP}.yaml"
|
cat "$TEMP_TEMPLATE" | wild-compile-template > "$PATCH_FILE"
|
||||||
rm -f "$TEMP_TEMPLATE"
|
rm -f "$TEMP_TEMPLATE"
|
||||||
|
|
||||||
# Generate final machine config for the specified node
|
# Generate final machine config for the specified node
|
||||||
print_info "Generating final machine configuration..."
|
print_info "Generating final machine configuration..."
|
||||||
talosctl machineconfig patch "$BASE_CONFIG" --patch @"${NODE_SETUP_DIR}/patch/${NODE_TYPE}-${NODE_IP}.yaml" -o "$OUTPUT_CONFIG"
|
talosctl machineconfig patch "$BASE_CONFIG" --patch @"$PATCH_FILE" -o "$OUTPUT_CONFIG"
|
||||||
|
|
||||||
# Update talosctl context with this node
|
# Update talosctl context with this node
|
||||||
print_info "Updating talosctl context..."
|
print_info "Updating talosctl context..."
|
||||||
@@ -254,7 +232,7 @@ talosctl config node "$NODE_IP"
|
|||||||
print_success "Machine configuration generated successfully!"
|
print_success "Machine configuration generated successfully!"
|
||||||
echo ""
|
echo ""
|
||||||
print_info "Generated files:"
|
print_info "Generated files:"
|
||||||
print_info " - Patch: ${NODE_SETUP_DIR}/patch/${NODE_TYPE}-${NODE_IP}.yaml"
|
print_info " - Patch: $PATCH_FILE"
|
||||||
print_info " - Final config: $OUTPUT_CONFIG"
|
print_info " - Final config: $OUTPUT_CONFIG"
|
||||||
echo ""
|
echo ""
|
||||||
print_info "Template used: ${TEMPLATE_FILE}"
|
print_info "Template used: ${TEMPLATE_FILE}"
|
||||||
|
245
bin/wild-cluster-node-up
Executable file
245
bin/wild-cluster-node-up
Executable file
@@ -0,0 +1,245 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Get WC_ROOT (where this script and templates live)
|
||||||
|
WC_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
export WC_ROOT
|
||||||
|
|
||||||
|
# Set up cloud directory (WC_HOME is where user's cloud will be)
|
||||||
|
WC_HOME="$(pwd)"
|
||||||
|
export WC_HOME
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
print_header() {
|
||||||
|
echo -e "\n${BLUE}=== $1 ===${NC}\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}INFO:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}WARNING:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}SUCCESS:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}ERROR:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current config value safely
|
||||||
|
get_current_config() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/config.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-config "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Usage function
|
||||||
|
usage() {
|
||||||
|
echo "Usage: wild-cluster-node-up <node-ip> [options]"
|
||||||
|
echo ""
|
||||||
|
echo "Apply Talos machine configuration to a registered node."
|
||||||
|
echo ""
|
||||||
|
echo "Arguments:"
|
||||||
|
echo " node-ip IP address of the registered node"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -i, --insecure Apply configuration in insecure mode (for maintenance mode nodes)"
|
||||||
|
echo " --dry-run Show the command that would be executed without running it"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " wild-cluster-node-up 192.168.1.91"
|
||||||
|
echo " wild-cluster-node-up 192.168.1.100 --insecure"
|
||||||
|
echo " wild-cluster-node-up 192.168.1.100 --dry-run"
|
||||||
|
echo ""
|
||||||
|
echo "This script will:"
|
||||||
|
echo " - Verify the node is registered in config.yaml"
|
||||||
|
echo " - Check that a machine configuration exists for the node"
|
||||||
|
echo " - Apply the configuration using talosctl apply-config"
|
||||||
|
echo " - Use insecure mode for nodes in maintenance mode"
|
||||||
|
echo ""
|
||||||
|
echo "Requirements:"
|
||||||
|
echo " - Must be run from a wild-cloud directory"
|
||||||
|
echo " - Node must be registered (hardware detected) first"
|
||||||
|
echo " - Machine configuration must exist for the node"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
NODE_IP=""
|
||||||
|
INSECURE_MODE=false
|
||||||
|
DRY_RUN=false
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-i|--insecure)
|
||||||
|
INSECURE_MODE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--dry-run)
|
||||||
|
DRY_RUN=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
if [ -z "$NODE_IP" ]; then
|
||||||
|
NODE_IP="$1"
|
||||||
|
else
|
||||||
|
echo "Unexpected argument: $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if node IP was provided
|
||||||
|
if [ -z "$NODE_IP" ]; then
|
||||||
|
echo "Error: Node IP address is required"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if we're in a wild-cloud directory
|
||||||
|
if [ ! -d ".wildcloud" ]; then
|
||||||
|
print_error "You must run this script from a wild-cloud directory"
|
||||||
|
print_info "Run 'wild-setup' or 'wild-init' first to initialize a wild-cloud project"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check required configuration
|
||||||
|
if [ -z "$(get_current_config "cluster.name")" ]; then
|
||||||
|
print_error "Basic cluster configuration is missing"
|
||||||
|
print_info "Run 'wild-setup' or 'wild-init' first to configure your cluster"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_header "Talos Node Configuration Application"
|
||||||
|
|
||||||
|
# Check if the specified node is registered
|
||||||
|
NODE_INTERFACE=$(yq eval ".cluster.nodes.active.\"${NODE_IP}\".interface" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||||
|
NODE_DISK=$(yq eval ".cluster.nodes.active.\"${NODE_IP}\".disk" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||||
|
IS_CONTROL=$(yq eval ".cluster.nodes.active.\"${NODE_IP}\".control" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||||
|
|
||||||
|
if [ -z "$NODE_INTERFACE" ] || [ "$NODE_INTERFACE" = "null" ]; then
|
||||||
|
print_error "Node $NODE_IP is not registered in config.yaml"
|
||||||
|
print_info "Please register the node first by running:"
|
||||||
|
print_info " wild-node-detect $NODE_IP"
|
||||||
|
print_info "Or run 'wild-setup' to register nodes interactively"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine node type
|
||||||
|
if [ "$IS_CONTROL" = "true" ]; then
|
||||||
|
NODE_TYPE="control plane"
|
||||||
|
else
|
||||||
|
NODE_TYPE="worker"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_info "Applying configuration to $NODE_TYPE node: $NODE_IP"
|
||||||
|
print_info "Node details:"
|
||||||
|
print_info " - Interface: $NODE_INTERFACE"
|
||||||
|
print_info " - Disk: $NODE_DISK"
|
||||||
|
print_info " - Type: $NODE_TYPE"
|
||||||
|
|
||||||
|
# Check if machine config exists
|
||||||
|
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||||
|
CONFIG_FILE="${NODE_SETUP_DIR}/final/${NODE_IP}.yaml"
|
||||||
|
|
||||||
|
if [ ! -f "$CONFIG_FILE" ]; then
|
||||||
|
print_error "Machine configuration not found: $CONFIG_FILE"
|
||||||
|
print_info "Generate the machine configuration first:"
|
||||||
|
print_info " wild-cluster-node-machine-config-generate $NODE_IP"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Found machine configuration: $CONFIG_FILE"
|
||||||
|
|
||||||
|
# Build talosctl command
|
||||||
|
TALOSCTL_CMD="talosctl apply-config"
|
||||||
|
|
||||||
|
if [ "$INSECURE_MODE" = true ]; then
|
||||||
|
TALOSCTL_CMD="$TALOSCTL_CMD --insecure"
|
||||||
|
print_info "Using insecure mode (for maintenance mode nodes)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
TALOSCTL_CMD="$TALOSCTL_CMD --nodes $NODE_IP --file $CONFIG_FILE"
|
||||||
|
|
||||||
|
# Show the command
|
||||||
|
echo ""
|
||||||
|
print_info "Command to execute:"
|
||||||
|
echo " $TALOSCTL_CMD"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
print_info "Dry run mode - command shown above but not executed"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Apply the configuration
|
||||||
|
print_info "Applying machine configuration..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if eval "$TALOSCTL_CMD"; then
|
||||||
|
print_success "Machine configuration applied successfully!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ "$IS_CONTROL" = "true" ]; then
|
||||||
|
print_info "Next steps for control plane node:"
|
||||||
|
echo " 1. Wait for the node to reboot and come up with the new configuration"
|
||||||
|
echo " 2. If this is your first control plane node, bootstrap it:"
|
||||||
|
echo " talosctl bootstrap --nodes $NODE_IP"
|
||||||
|
echo " 3. Get kubeconfig when cluster is ready:"
|
||||||
|
echo " talosctl kubeconfig"
|
||||||
|
else
|
||||||
|
print_info "Next steps for worker node:"
|
||||||
|
echo " 1. Wait for the node to reboot and come up with the new configuration"
|
||||||
|
echo " 2. Node will join the cluster automatically"
|
||||||
|
echo " 3. Verify the node appears in the cluster:"
|
||||||
|
echo " kubectl get nodes"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Monitor node status with:"
|
||||||
|
echo " talosctl --nodes $NODE_IP dmesg"
|
||||||
|
echo " talosctl --nodes $NODE_IP get members"
|
||||||
|
|
||||||
|
else
|
||||||
|
print_error "Failed to apply machine configuration"
|
||||||
|
echo ""
|
||||||
|
print_info "Troubleshooting tips:"
|
||||||
|
echo " - Ensure the node is accessible at $NODE_IP"
|
||||||
|
echo " - For nodes in maintenance mode, use --insecure flag"
|
||||||
|
echo " - Check network connectivity and firewall settings"
|
||||||
|
echo " - Verify the machine configuration file is valid"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Node configuration completed!"
|
804
bin/wild-setup
804
bin/wild-setup
@@ -8,7 +8,7 @@ WC_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
|||||||
export WC_ROOT
|
export WC_ROOT
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# HELPER FUNCTIONS (used by all phases)
|
# HELPER FUNCTIONS
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
# Colors for output
|
# Colors for output
|
||||||
@@ -39,140 +39,62 @@ print_error() {
|
|||||||
echo -e "${RED}ERROR:${NC} $1"
|
echo -e "${RED}ERROR:${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Function to prompt for input with default value
|
|
||||||
prompt_with_default() {
|
|
||||||
local prompt="$1"
|
|
||||||
local default="$2"
|
|
||||||
local current_value="$3"
|
|
||||||
local result
|
|
||||||
|
|
||||||
if [ -n "${current_value}" ] && [ "${current_value}" != "null" ]; then
|
|
||||||
printf "%s [current: %s]: " "${prompt}" "${current_value}" >&2
|
|
||||||
read -r result
|
|
||||||
if [ -z "${result}" ]; then
|
|
||||||
result="${current_value}"
|
|
||||||
fi
|
|
||||||
elif [ -n "${default}" ]; then
|
|
||||||
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
|
||||||
read -r result
|
|
||||||
if [ -z "${result}" ]; then
|
|
||||||
result="${default}"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
printf "%s: " "${prompt}" >&2
|
|
||||||
read -r result
|
|
||||||
while [ -z "${result}" ]; do
|
|
||||||
printf "This value is required. Please enter a value: " >&2
|
|
||||||
read -r result
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${result}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Function to get current config value safely
|
|
||||||
get_current_config() {
|
|
||||||
local key="$1"
|
|
||||||
if [ -f "${WC_HOME}/config.yaml" ]; then
|
|
||||||
set +e
|
|
||||||
result=$(wild-config "${key}" 2>/dev/null)
|
|
||||||
set -e
|
|
||||||
echo "${result}"
|
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Function to get current secret value safely
|
|
||||||
get_current_secret() {
|
|
||||||
local key="$1"
|
|
||||||
if [ -f "${WC_HOME}/secrets.yaml" ]; then
|
|
||||||
set +e
|
|
||||||
result=$(wild-secret "${key}" 2>/dev/null)
|
|
||||||
set -e
|
|
||||||
echo "${result}"
|
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
UPDATE=false
|
|
||||||
|
|
||||||
# Phase tracking variables
|
# Phase tracking variables
|
||||||
SKIP_INSTALLER=false
|
SKIP_SCAFFOLD=false
|
||||||
SKIP_HARDWARE=false
|
SKIP_CLUSTER=false
|
||||||
SKIP_CONFIGS=false
|
SKIP_SERVICES=false
|
||||||
SKIP_INSTALL=false
|
|
||||||
|
|
||||||
# Parse arguments
|
# Parse arguments
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--update)
|
--skip-scaffold)
|
||||||
UPDATE=true
|
SKIP_SCAFFOLD=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--skip-installer)
|
--skip-cluster)
|
||||||
SKIP_INSTALLER=true
|
SKIP_CLUSTER=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--skip-hardware)
|
--skip-services)
|
||||||
SKIP_HARDWARE=true
|
SKIP_SERVICES=true
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--skip-configs)
|
|
||||||
SKIP_CONFIGS=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--skip-install)
|
|
||||||
SKIP_INSTALL=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--skip-all-phases)
|
|
||||||
SKIP_INSTALLER=true
|
|
||||||
SKIP_HARDWARE=true
|
|
||||||
SKIP_CONFIGS=true
|
|
||||||
SKIP_INSTALL=true
|
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
-h|--help)
|
-h|--help)
|
||||||
echo "Usage: $0 [--update] [phase-options]"
|
echo "Usage: $0 [component-options]"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Initialize and set up a complete Wild-Cloud cluster deployment."
|
echo "Complete Wild-Cloud setup - runs all components in sequence."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Cloud Options:"
|
echo "Component Control Options:"
|
||||||
echo " --update Update existing cloud files (overwrite)"
|
echo " --skip-scaffold Skip scaffold setup (cloud initialization)"
|
||||||
echo ""
|
echo " --skip-cluster Skip cluster setup (Phases 1-3)"
|
||||||
echo "Phase Control Options:"
|
echo " --skip-services Skip services setup (Phase 4)"
|
||||||
echo " --skip-installer Skip Phase 1 (Installer image generation)"
|
|
||||||
echo " --skip-hardware Skip Phase 2 (Node hardware detection)"
|
|
||||||
echo " --skip-configs Skip Phase 3 (Machine config generation)"
|
|
||||||
echo " --skip-install Skip Phase 4 (Cluster services installation)"
|
|
||||||
echo " --skip-all-phases Skip all phases (cloud setup only)"
|
|
||||||
echo ""
|
|
||||||
echo "General Options:"
|
|
||||||
echo " -h, --help Show this help message"
|
echo " -h, --help Show this help message"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Phases:"
|
echo "This script runs:"
|
||||||
echo " 1. Installer image - Generate custom Talos installer URLs"
|
echo " 1. wild-setup-scaffold # Cloud initialization and basic config"
|
||||||
echo " 2. Hardware detection - Discover node interfaces and disks"
|
echo " 2. wild-setup-cluster # Cluster infrastructure (Phases 1-3)"
|
||||||
echo " 3. Machine configs - Generate Talos machine configurations"
|
echo " 3. wild-setup-services # Cluster services (Phase 4)"
|
||||||
echo " 4. Cluster services - Install MetalLB, Traefik, cert-manager, etc."
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Configuration is done automatically when needed by each phase."
|
echo "You can also run these components individually:"
|
||||||
|
echo " - wild-setup-scaffold [--update]"
|
||||||
|
echo " - wild-setup-cluster [--skip-installer] [--skip-hardware] [--skip-configs]"
|
||||||
|
echo " - wild-setup-services [--skip-install]"
|
||||||
echo ""
|
echo ""
|
||||||
echo "By default, this script will only run in an empty directory."
|
echo "For detailed options for each component, use:"
|
||||||
echo "Use --update to overwrite existing cloud files while preserving other files."
|
echo " wild-setup-scaffold --help"
|
||||||
|
echo " wild-setup-cluster --help"
|
||||||
|
echo " wild-setup-services --help"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
-*)
|
-*)
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
echo "Usage: $0 [--update] [phase-options]"
|
echo "Usage: $0 [component-options]"
|
||||||
echo "Use --help for full usage information"
|
echo "Use --help for full usage information"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unexpected argument: $1"
|
echo "Unexpected argument: $1"
|
||||||
echo "Usage: $0 [--update] [phase-options]"
|
echo "Usage: $0 [component-options]"
|
||||||
echo "Use --help for full usage information"
|
echo "Use --help for full usage information"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
@@ -183,645 +105,111 @@ done
|
|||||||
WC_HOME="$(pwd)"
|
WC_HOME="$(pwd)"
|
||||||
export WC_HOME
|
export WC_HOME
|
||||||
|
|
||||||
# Template directory (in WC_ROOT, never written to)
|
print_header "Wild-Cloud Complete Setup"
|
||||||
TEMPLATE_DIR="${WC_ROOT}/setup/home-scaffold"
|
print_info "Running complete Wild-Cloud setup using modular components"
|
||||||
|
echo ""
|
||||||
|
|
||||||
if [ ! -d "${TEMPLATE_DIR}" ]; then
|
# =============================================================================
|
||||||
echo "Error: Template directory not found at ${TEMPLATE_DIR}"
|
# COMPONENT 1: SCAFFOLD SETUP
|
||||||
exit 1
|
# =============================================================================
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if cloud already exists
|
if [ "${SKIP_SCAFFOLD}" = false ]; then
|
||||||
if [ -d ".wildcloud" ]; then
|
print_header "Component 1: Scaffold Setup"
|
||||||
echo "Wild-Cloud already exists in this directory."
|
print_info "Running wild-setup-scaffold..."
|
||||||
echo ""
|
|
||||||
read -p "Do you want to update cloud files? (y/N): " -n 1 -r
|
if wild-setup-scaffold; then
|
||||||
echo
|
print_success "Component 1 completed: Scaffold setup"
|
||||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
|
||||||
UPDATE=true
|
|
||||||
echo "Updating cloud files..."
|
|
||||||
else
|
else
|
||||||
echo "Skipping cloud update."
|
print_error "Component 1 failed: Scaffold setup"
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Check if current directory is empty for new cloud
|
|
||||||
if [ "${UPDATE}" = false ]; then
|
|
||||||
# Check if directory has any files (including hidden files, excluding . and .. and .git)
|
|
||||||
if [ -n "$(find . -maxdepth 1 -name ".*" -o -name "*" | grep -v "^\.$" | grep -v "^\.\.$" | grep -v "^\./\.git$" | head -1)" ]; then
|
|
||||||
echo "Error: Current directory is not empty"
|
|
||||||
echo "Use --update flag to overwrite existing cloud files while preserving other files"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Initializing Wild-Cloud in $(pwd)"
|
|
||||||
UPDATE=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Initialize cloud files if needed
|
|
||||||
if [ ! -d ".wildcloud" ] || [ "${UPDATE}" = true ]; then
|
|
||||||
if [ "${UPDATE}" = true ]; then
|
|
||||||
echo "Updating cloud files (preserving existing custom files)"
|
|
||||||
else
|
|
||||||
echo "Creating cloud files"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Function to copy files and directories
|
|
||||||
copy_cloud_files() {
|
|
||||||
local src_dir="$1"
|
|
||||||
local dest_dir="$2"
|
|
||||||
|
|
||||||
# Create destination directory if it doesn't exist
|
|
||||||
mkdir -p "${dest_dir}"
|
|
||||||
|
|
||||||
# Copy directory structure
|
|
||||||
find "${src_dir}" -type d | while read -r src_subdir; do
|
|
||||||
rel_path="${src_subdir#${src_dir}}"
|
|
||||||
rel_path="${rel_path#/}" # Remove leading slash if present
|
|
||||||
if [ -n "${rel_path}" ]; then
|
|
||||||
mkdir -p "${dest_dir}/${rel_path}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Copy files
|
|
||||||
find "${src_dir}" -type f | while read -r src_file; do
|
|
||||||
rel_path="${src_file#${src_dir}}"
|
|
||||||
rel_path="${rel_path#/}" # Remove leading slash if present
|
|
||||||
dest_file="${dest_dir}/${rel_path}"
|
|
||||||
|
|
||||||
# Ensure destination directory exists
|
|
||||||
dest_file_dir=$(dirname "${dest_file}")
|
|
||||||
mkdir -p "${dest_file_dir}"
|
|
||||||
|
|
||||||
if [ "${UPDATE}" = true ] && [ -f "${dest_file}" ]; then
|
|
||||||
echo "Updating: ${rel_path}"
|
|
||||||
else
|
|
||||||
echo "Creating: ${rel_path}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cp "${src_file}" "${dest_file}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# Copy cloud files to current directory
|
|
||||||
copy_cloud_files "${TEMPLATE_DIR}" "."
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Wild-Cloud initialized successfully!"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# CONFIGURATION HELPERS: Configure settings when needed by phases
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
configure_basic_settings() {
|
|
||||||
if [ ! -f "${WC_HOME}/config.yaml" ] || [ -z "$(get_current_config "operator.email")" ]; then
|
|
||||||
print_header "Basic Configuration"
|
|
||||||
|
|
||||||
# Detect current network for suggestions
|
|
||||||
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
|
||||||
GATEWAY_IP=$(ip route | grep default | awk '{print $3; exit}' 2>/dev/null || echo "192.168.1.1")
|
|
||||||
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
|
||||||
print_info "Detected network: ${SUBNET_PREFIX}.x (gateway: ${GATEWAY_IP})"
|
|
||||||
|
|
||||||
echo "This will configure basic settings for your wild-cloud deployment."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Basic Information
|
|
||||||
current_email=$(get_current_config "operator.email")
|
|
||||||
email=$(prompt_with_default "Your email address (for Let's Encrypt certificates)" "" "${current_email}")
|
|
||||||
wild-config-set "operator.email" "${email}"
|
|
||||||
|
|
||||||
# Domain Configuration
|
|
||||||
current_base_domain=$(get_current_config "cloud.baseDomain")
|
|
||||||
base_domain=$(prompt_with_default "Your base domain name (e.g., example.com)" "" "${current_base_domain}")
|
|
||||||
wild-config-set "cloud.baseDomain" "${base_domain}"
|
|
||||||
|
|
||||||
current_domain=$(get_current_config "cloud.domain")
|
|
||||||
domain=$(prompt_with_default "Your public cloud domain" "cloud.${base_domain}" "${current_domain}")
|
|
||||||
wild-config-set "cloud.domain" "${domain}"
|
|
||||||
|
|
||||||
current_internal_domain=$(get_current_config "cloud.internalDomain")
|
|
||||||
internal_domain=$(prompt_with_default "Your internal cloud domain" "internal.${domain}" "${current_internal_domain}")
|
|
||||||
wild-config-set "cloud.internalDomain" "${internal_domain}"
|
|
||||||
|
|
||||||
# Derive cluster name from domain
|
|
||||||
cluster_name=$(echo "${domain}" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
|
|
||||||
wild-config-set "cluster.name" "${cluster_name}"
|
|
||||||
print_info "Set cluster name to: ${cluster_name}"
|
|
||||||
|
|
||||||
print_success "Basic configuration completed"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
configure_dns_and_certificates() {
|
|
||||||
if [ -z "$(get_current_config "cluster.certManager.cloudflare.domain")" ]; then
|
|
||||||
print_header "DNS and Certificate Configuration"
|
|
||||||
echo "For automatic SSL certificates and DNS management, we use Cloudflare."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
base_domain=$(get_current_config "cloud.baseDomain")
|
|
||||||
domain=$(get_current_config "cloud.domain")
|
|
||||||
|
|
||||||
echo "Is your domain '${base_domain}' registered and managed through Cloudflare? (y/n)"
|
|
||||||
read -r use_cloudflare
|
|
||||||
|
|
||||||
if [[ "${use_cloudflare}" =~ ^[Yy]$ ]]; then
|
|
||||||
wild-config-set "cluster.certManager.cloudflare.domain" "${domain}"
|
|
||||||
|
|
||||||
current_cf_token=$(get_current_secret "cloudflare.token")
|
|
||||||
if [ -z "${current_cf_token}" ]; then
|
|
||||||
echo ""
|
|
||||||
print_info "You'll need a Cloudflare API token with the following permissions:"
|
|
||||||
echo " - Zone:Zone:Read"
|
|
||||||
echo " - Zone:DNS:Edit"
|
|
||||||
echo " - Include:All zones"
|
|
||||||
echo ""
|
|
||||||
echo "Create one at: https://dash.cloudflare.com/profile/api-tokens"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
|
|
||||||
cf_token=$(prompt_with_default "Cloudflare API token" "" "${current_cf_token}")
|
|
||||||
wild-secret-set "cloudflare.token" "${cf_token}"
|
|
||||||
else
|
|
||||||
print_warning "You'll need to configure DNS and SSL certificates manually."
|
|
||||||
print_info "Consider transferring your domain to Cloudflare for easier management."
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_success "DNS and certificate configuration completed"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
configure_network_settings() {
|
|
||||||
if [ -z "$(get_current_config "cloud.router.ip")" ]; then
|
|
||||||
print_header "Network Configuration"
|
|
||||||
|
|
||||||
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
|
||||||
GATEWAY_IP=$(ip route | grep default | awk '{print $3; exit}' 2>/dev/null || echo "192.168.1.1")
|
|
||||||
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
|
||||||
|
|
||||||
current_router_ip=$(get_current_config "cloud.router.ip")
|
|
||||||
router_ip=$(prompt_with_default "Router/Gateway IP" "${GATEWAY_IP}" "${current_router_ip}")
|
|
||||||
wild-config-set "cloud.router.ip" "${router_ip}"
|
|
||||||
|
|
||||||
current_dns_ip=$(get_current_config "cloud.dns.ip")
|
|
||||||
dns_ip=$(prompt_with_default "DNS server IP (dnsmasq machine)" "${SUBNET_PREFIX}.50" "${current_dns_ip}")
|
|
||||||
wild-config-set "cloud.dns.ip" "${dns_ip}"
|
|
||||||
|
|
||||||
current_dhcp_range=$(get_current_config "cloud.dhcpRange")
|
|
||||||
dhcp_range=$(prompt_with_default "DHCP range for dnsmasq" "${SUBNET_PREFIX}.100,${SUBNET_PREFIX}.200" "${current_dhcp_range}")
|
|
||||||
wild-config-set "cloud.dhcpRange" "${dhcp_range}"
|
|
||||||
|
|
||||||
current_interface=$(get_current_config "cloud.dnsmasq.interface")
|
|
||||||
interface=$(prompt_with_default "Network interface for dnsmasq" "eth0" "${current_interface}")
|
|
||||||
wild-config-set "cloud.dnsmasq.interface" "${interface}"
|
|
||||||
|
|
||||||
current_external_resolver=$(get_current_config "cloud.dns.externalResolver")
|
|
||||||
external_resolver=$(prompt_with_default "External DNS resolver" "1.1.1.1" "${current_external_resolver}")
|
|
||||||
wild-config-set "cloud.dns.externalResolver" "${external_resolver}"
|
|
||||||
|
|
||||||
print_success "Network configuration completed"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
configure_cluster_settings() {
|
|
||||||
if [ -z "$(get_current_config "cluster.nodes.talos.version")" ]; then
|
|
||||||
print_header "Kubernetes Cluster Configuration"
|
|
||||||
|
|
||||||
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
|
||||||
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
|
||||||
|
|
||||||
current_talos_version=$(get_current_config "cluster.nodes.talos.version")
|
|
||||||
talos_version=$(prompt_with_default "Talos version" "v1.6.1" "${current_talos_version}")
|
|
||||||
wild-config-set "cluster.nodes.talos.version" "${talos_version}"
|
|
||||||
|
|
||||||
current_ip_pool=$(get_current_config "cluster.ipAddressPool")
|
|
||||||
ip_pool=$(prompt_with_default "MetalLB IP address pool" "${SUBNET_PREFIX}.80-${SUBNET_PREFIX}.89" "${current_ip_pool}")
|
|
||||||
wild-config-set "cluster.ipAddressPool" "${ip_pool}"
|
|
||||||
|
|
||||||
# Automatically set load balancer IP to first address in the pool
|
|
||||||
lb_ip=$(echo "${ip_pool}" | cut -d'-' -f1)
|
|
||||||
wild-config-set "cluster.loadBalancerIp" "${lb_ip}"
|
|
||||||
print_info "Set load balancer IP to: ${lb_ip} (first IP in MetalLB pool)"
|
|
||||||
|
|
||||||
# Control plane nodes
|
|
||||||
echo ""
|
|
||||||
print_info "Configure control plane nodes (you need at least 3 for HA):"
|
|
||||||
|
|
||||||
current_vip=$(get_current_config "cluster.nodes.control.vip")
|
|
||||||
vip=$(prompt_with_default "Control plane virtual IP" "${SUBNET_PREFIX}.90" "${current_vip}")
|
|
||||||
wild-config-set "cluster.nodes.control.vip" "${vip}"
|
|
||||||
|
|
||||||
for i in 1 2 3; do
|
|
||||||
current_node_ip=$(get_current_config "cluster.nodes.control.node${i}.ip")
|
|
||||||
node_ip=$(prompt_with_default "Control plane node ${i} IP address" "${SUBNET_PREFIX}.$(( 90 + i ))" "${current_node_ip}")
|
|
||||||
wild-config-set "cluster.nodes.control.node${i}.ip" "${node_ip}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Talos schematic ID
|
|
||||||
current_schematic_id=$(get_current_config "cluster.nodes.talos.schematicId")
|
|
||||||
echo ""
|
|
||||||
print_info "Get your Talos schematic ID from: https://factory.talos.dev/"
|
|
||||||
print_info "This customizes Talos with the drivers needed for your hardware."
|
|
||||||
|
|
||||||
# Look up default schematic ID from talos-schemas.yaml
|
|
||||||
default_schematic_id=""
|
|
||||||
schemas_file="${WC_ROOT}/setup/cluster-nodes/talos-schemas.yaml"
|
|
||||||
if [ -f "$schemas_file" ]; then
|
|
||||||
default_schematic_id=$(yq eval ".talos-schemas.\"${talos_version}\"" "$schemas_file" 2>/dev/null)
|
|
||||||
if [ -n "$default_schematic_id" ] && [ "$default_schematic_id" != "null" ]; then
|
|
||||||
print_info "Default schematic ID available for Talos $talos_version"
|
|
||||||
else
|
|
||||||
default_schematic_id=""
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
schematic_id=$(prompt_with_default "Talos schematic ID" "${default_schematic_id}" "${current_schematic_id}")
|
|
||||||
wild-config-set "cluster.nodes.talos.schematicId" "${schematic_id}"
|
|
||||||
|
|
||||||
# External DNS
|
|
||||||
cluster_name=$(get_current_config "cluster.name")
|
|
||||||
current_owner_id=$(get_current_config "cluster.externalDns.ownerId")
|
|
||||||
owner_id=$(prompt_with_default "External DNS owner ID" "external-dns-${cluster_name}" "${current_owner_id}")
|
|
||||||
wild-config-set "cluster.externalDns.ownerId" "${owner_id}"
|
|
||||||
|
|
||||||
print_success "Cluster configuration completed"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
configure_storage_settings() {
|
|
||||||
if [ -z "$(get_current_config "cloud.nfs.host")" ]; then
|
|
||||||
print_header "Storage Configuration"
|
|
||||||
|
|
||||||
dns_ip=$(get_current_config "cloud.dns.ip")
|
|
||||||
internal_domain=$(get_current_config "cloud.internalDomain")
|
|
||||||
|
|
||||||
current_nfs_host=$(get_current_config "cloud.nfs.host")
|
|
||||||
nfs_host=$(prompt_with_default "NFS server host" "${dns_ip}" "${current_nfs_host}")
|
|
||||||
wild-config-set "cloud.nfs.host" "${nfs_host}"
|
|
||||||
|
|
||||||
current_media_path=$(get_current_config "cloud.nfs.mediaPath")
|
|
||||||
media_path=$(prompt_with_default "NFS media path" "/mnt/storage/media" "${current_media_path}")
|
|
||||||
wild-config-set "cloud.nfs.mediaPath" "${media_path}"
|
|
||||||
|
|
||||||
current_storage_capacity=$(get_current_config "cloud.nfs.storageCapacity")
|
|
||||||
storage_capacity=$(prompt_with_default "Storage capacity for NFS PV" "1Ti" "${current_storage_capacity}")
|
|
||||||
wild-config-set "cloud.nfs.storageCapacity" "${storage_capacity}"
|
|
||||||
|
|
||||||
# Docker Registry
|
|
||||||
current_registry_host=$(get_current_config "cloud.dockerRegistryHost")
|
|
||||||
registry_host=$(prompt_with_default "Docker registry hostname" "registry.${internal_domain}" "${current_registry_host}")
|
|
||||||
wild-config-set "cloud.dockerRegistryHost" "${registry_host}"
|
|
||||||
|
|
||||||
print_success "Storage configuration completed"
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# PHASE 1: Talos asset download
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
if [ "${SKIP_INSTALLER}" = false ]; then
|
|
||||||
print_header "Phase 1: Installer Image Generation"
|
|
||||||
|
|
||||||
print_info "Running wild-cluster-node-image-create..."
|
|
||||||
wild-cluster-node-image-create
|
|
||||||
|
|
||||||
print_success "Phase 1 completed: Installer image generated"
|
|
||||||
echo ""
|
echo ""
|
||||||
else
|
else
|
||||||
print_info "Skipping Phase 1: Installer Image Generation"
|
print_info "Skipping Component 1: Scaffold Setup"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# PHASE 2: Node Hardware Detection
|
# COMPONENT 2: CLUSTER SETUP
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
if [ "${SKIP_HARDWARE}" = false ]; then
|
if [ "${SKIP_CLUSTER}" = false ]; then
|
||||||
print_header "Phase 2: Node Hardware Detection"
|
print_header "Component 2: Cluster Setup"
|
||||||
|
print_info "Running wild-setup-cluster..."
|
||||||
|
|
||||||
# Configure basic settings, network, and cluster settings before node detection
|
if wild-setup-cluster; then
|
||||||
configure_basic_settings
|
print_success "Component 2 completed: Cluster setup"
|
||||||
configure_network_settings
|
|
||||||
configure_cluster_settings
|
|
||||||
|
|
||||||
print_info "This phase will help you register Talos nodes by discovering their hardware."
|
|
||||||
print_info "You'll need nodes booted in maintenance mode and accessible via IP."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Register up to 3 control plane nodes
|
|
||||||
for i in 1 2 3; do
|
|
||||||
echo ""
|
|
||||||
print_info "Configure control plane node $i:"
|
|
||||||
read -p "Do you want to register control plane node $i now? (y/N): " -r register_node
|
|
||||||
|
|
||||||
if [[ $register_node =~ ^[Yy]$ ]]; then
|
|
||||||
read -p "Enter maintenance IP for node $i: " -r NODE_IP
|
|
||||||
|
|
||||||
if [ -z "$NODE_IP" ]; then
|
|
||||||
print_warning "Skipping node $i registration"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_info "Running wild-node-detect for node $i..."
|
|
||||||
NODE_INFO=$(wild-node-detect "$NODE_IP")
|
|
||||||
|
|
||||||
if [ $? -eq 0 ] && [ -n "$NODE_INFO" ]; then
|
|
||||||
# Parse JSON response
|
|
||||||
INTERFACE=$(echo "$NODE_INFO" | jq -r '.interface')
|
|
||||||
SELECTED_DISK=$(echo "$NODE_INFO" | jq -r '.selected_disk')
|
|
||||||
AVAILABLE_DISKS=$(echo "$NODE_INFO" | jq -r '.disks | join(", ")')
|
|
||||||
|
|
||||||
print_success "Hardware detected for node $i:"
|
|
||||||
print_info " - Interface: $INTERFACE"
|
|
||||||
print_info " - Available disks: $AVAILABLE_DISKS"
|
|
||||||
print_info " - Selected disk: $SELECTED_DISK"
|
|
||||||
|
|
||||||
# Allow user to override disk selection
|
|
||||||
echo ""
|
|
||||||
read -p "Use selected disk '$SELECTED_DISK'? (Y/n): " -r use_disk
|
|
||||||
if [[ $use_disk =~ ^[Nn]$ ]]; then
|
|
||||||
echo "Available disks:"
|
|
||||||
echo "$NODE_INFO" | jq -r '.disks[]' | nl -w2 -s') '
|
|
||||||
read -p "Enter disk number: " -r disk_num
|
|
||||||
SELECTED_DISK=$(echo "$NODE_INFO" | jq -r ".disks[$((disk_num-1))]")
|
|
||||||
if [ "$SELECTED_DISK" = "null" ] || [ -z "$SELECTED_DISK" ]; then
|
|
||||||
print_error "Invalid disk selection"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
print_info "Selected disk: $SELECTED_DISK"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update config.yaml with hardware info
|
|
||||||
print_info "Updating config.yaml for node $i..."
|
|
||||||
|
|
||||||
# Get the target IP for this node from existing config
|
|
||||||
TARGET_IP=$(wild-config "cluster.nodes.control.node${i}.ip")
|
|
||||||
|
|
||||||
# Update the unified node configuration
|
|
||||||
wild-config-set "cluster.nodes.active.${TARGET_IP}.interface" "$INTERFACE"
|
|
||||||
wild-config-set "cluster.nodes.active.${TARGET_IP}.disk" "$SELECTED_DISK"
|
|
||||||
wild-config-set "cluster.nodes.active.${TARGET_IP}.control" "true"
|
|
||||||
|
|
||||||
print_success "Node $i registered successfully:"
|
|
||||||
print_info " - Target IP: $TARGET_IP"
|
|
||||||
print_info " - Interface: $INTERFACE"
|
|
||||||
print_info " - Disk: $SELECTED_DISK"
|
|
||||||
else
|
else
|
||||||
print_error "Failed to detect hardware for node $i"
|
print_error "Component 2 failed: Cluster setup"
|
||||||
continue
|
exit 1
|
||||||
fi
|
|
||||||
else
|
|
||||||
print_info "Skipping node $i registration"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Register worker nodes
|
|
||||||
echo ""
|
|
||||||
print_info "Configure worker nodes (optional):"
|
|
||||||
while true; do
|
|
||||||
echo ""
|
|
||||||
read -p "Do you want to register a worker node? (y/N): " -r register_worker
|
|
||||||
|
|
||||||
if [[ $register_worker =~ ^[Yy]$ ]]; then
|
|
||||||
read -p "Enter maintenance IP for worker node: " -r WORKER_IP
|
|
||||||
|
|
||||||
if [ -z "$WORKER_IP" ]; then
|
|
||||||
print_warning "No IP provided, skipping worker node"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_info "Running wild-node-detect for worker node $WORKER_IP..."
|
|
||||||
WORKER_INFO=$(wild-node-detect "$WORKER_IP")
|
|
||||||
|
|
||||||
if [ $? -eq 0 ] && [ -n "$WORKER_INFO" ]; then
|
|
||||||
# Parse JSON response
|
|
||||||
INTERFACE=$(echo "$WORKER_INFO" | jq -r '.interface')
|
|
||||||
SELECTED_DISK=$(echo "$WORKER_INFO" | jq -r '.selected_disk')
|
|
||||||
AVAILABLE_DISKS=$(echo "$WORKER_INFO" | jq -r '.disks | join(", ")')
|
|
||||||
|
|
||||||
print_success "Hardware detected for worker node $WORKER_IP:"
|
|
||||||
print_info " - Interface: $INTERFACE"
|
|
||||||
print_info " - Available disks: $AVAILABLE_DISKS"
|
|
||||||
print_info " - Selected disk: $SELECTED_DISK"
|
|
||||||
|
|
||||||
# Allow user to override disk selection
|
|
||||||
echo ""
|
|
||||||
read -p "Use selected disk '$SELECTED_DISK'? (Y/n): " -r use_disk
|
|
||||||
if [[ $use_disk =~ ^[Nn]$ ]]; then
|
|
||||||
echo "Available disks:"
|
|
||||||
echo "$WORKER_INFO" | jq -r '.disks[]' | nl -w2 -s') '
|
|
||||||
read -p "Enter disk number: " -r disk_num
|
|
||||||
SELECTED_DISK=$(echo "$WORKER_INFO" | jq -r ".disks[$((disk_num-1))]")
|
|
||||||
if [ "$SELECTED_DISK" = "null" ] || [ -z "$SELECTED_DISK" ]; then
|
|
||||||
print_error "Invalid disk selection"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
print_info "Selected disk: $SELECTED_DISK"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update config.yaml with worker hardware info
|
|
||||||
print_info "Updating config.yaml for worker node $WORKER_IP..."
|
|
||||||
|
|
||||||
# Store under unified cluster.nodes.active.<ip-address>
|
|
||||||
wild-config-set "cluster.nodes.active.${WORKER_IP}.interface" "$INTERFACE"
|
|
||||||
wild-config-set "cluster.nodes.active.${WORKER_IP}.disk" "$SELECTED_DISK"
|
|
||||||
wild-config-set "cluster.nodes.active.${WORKER_IP}.control" "false"
|
|
||||||
|
|
||||||
print_success "Worker node $WORKER_IP registered successfully:"
|
|
||||||
print_info " - IP: $WORKER_IP"
|
|
||||||
print_info " - Interface: $INTERFACE"
|
|
||||||
print_info " - Disk: $SELECTED_DISK"
|
|
||||||
else
|
|
||||||
print_error "Failed to detect hardware for worker node $WORKER_IP"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
print_success "Phase 2 completed: Node hardware detection"
|
|
||||||
echo ""
|
|
||||||
else
|
|
||||||
print_info "Skipping Phase 2: Node Hardware Detection"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# PHASE 3: Machine Config Generation
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
if [ "${SKIP_CONFIGS}" = false ]; then
|
|
||||||
print_header "Phase 3: Machine Config Generation"
|
|
||||||
|
|
||||||
# Configure basic settings and cluster settings if needed
|
|
||||||
configure_basic_settings
|
|
||||||
configure_cluster_settings
|
|
||||||
|
|
||||||
# Get all registered nodes from cluster.nodes.active
|
|
||||||
REGISTERED_NODES=()
|
|
||||||
if yq eval '.cluster.nodes.active // {}' "${WC_HOME}/config.yaml" | grep -q "interface"; then
|
|
||||||
ALL_NODE_IPS=$(yq eval '.cluster.nodes.active | keys | .[]' "${WC_HOME}/config.yaml" 2>/dev/null || echo "")
|
|
||||||
|
|
||||||
for NODE_IP in $ALL_NODE_IPS; do
|
|
||||||
# Remove quotes from yq output
|
|
||||||
NODE_IP=$(echo "$NODE_IP" | tr -d '"')
|
|
||||||
REGISTERED_NODES+=("$NODE_IP")
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ${#REGISTERED_NODES[@]} -eq 0 ]; then
|
|
||||||
print_warning "No nodes have been registered yet."
|
|
||||||
print_info "Run Phase 2 (Hardware Detection) first to register nodes"
|
|
||||||
else
|
|
||||||
print_info "Generating machine configs for ${#REGISTERED_NODES[@]} registered nodes..."
|
|
||||||
|
|
||||||
# Generate config for each registered node
|
|
||||||
for NODE_IP in "${REGISTERED_NODES[@]}"; do
|
|
||||||
echo ""
|
|
||||||
print_info "Generating config for node $NODE_IP..."
|
|
||||||
wild-cluster-node-machine-config-generate "$NODE_IP"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
print_success "All machine configurations generated successfully!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_success "Phase 3 completed: Machine config generation"
|
|
||||||
echo ""
|
|
||||||
else
|
|
||||||
print_info "Skipping Phase 3: Machine Config Generation"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# PHASE 4: Cluster Services Installation
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
if [ "${SKIP_INSTALL}" = false ]; then
|
|
||||||
print_header "Phase 4: Cluster Services Installation"
|
|
||||||
|
|
||||||
# Configure settings needed for cluster services
|
|
||||||
configure_basic_settings
|
|
||||||
configure_dns_and_certificates
|
|
||||||
configure_network_settings
|
|
||||||
configure_storage_settings
|
|
||||||
|
|
||||||
print_info "This phase prepares and installs core cluster services (MetalLB, Traefik, cert-manager, etc.)"
|
|
||||||
print_warning "Make sure your cluster is running and kubectl is configured!"
|
|
||||||
|
|
||||||
# Generate cluster services setup files
|
|
||||||
print_info "Generating cluster services setup files..."
|
|
||||||
wild-cluster-services-generate --force
|
|
||||||
|
|
||||||
read -p "Do you want to install cluster services now? (y/N): " -r install_services
|
|
||||||
|
|
||||||
if [[ $install_services =~ ^[Yy]$ ]]; then
|
|
||||||
print_info "Installing cluster services..."
|
|
||||||
wild-cluster-services-up
|
|
||||||
SERVICES_INSTALLED=true
|
|
||||||
else
|
|
||||||
print_info "Skipping cluster services installation"
|
|
||||||
print_info "You can install them later with: wild-cluster-services-up"
|
|
||||||
SKIP_INSTALL=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${SKIP_INSTALL}" = false ] && [ "${SERVICES_INSTALLED:-false}" = true ]; then
|
|
||||||
print_success "Phase 4 completed: Cluster services installation"
|
|
||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
else
|
else
|
||||||
print_info "Skipping Phase 4: Cluster Services Installation"
|
print_info "Skipping Component 2: Cluster Setup"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# COMPONENT 3: SERVICES SETUP
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
if [ "${SKIP_SERVICES}" = false ]; then
|
||||||
|
print_header "Component 3: Services Setup"
|
||||||
|
print_info "Running wild-setup-services..."
|
||||||
|
|
||||||
|
if wild-setup-services; then
|
||||||
|
print_success "Component 3 completed: Services setup"
|
||||||
|
else
|
||||||
|
print_error "Component 3 failed: Services setup"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
print_info "Skipping Component 3: Services Setup"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# FINAL SUMMARY
|
# FINAL SUMMARY
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
print_header "Wild-Cloud Setup Complete!"
|
print_header "Wild-Cloud Complete Setup Finished!"
|
||||||
|
|
||||||
print_success "All phases completed successfully!"
|
print_success "All components completed successfully!"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
print_info "What was accomplished:"
|
print_info "What was accomplished:"
|
||||||
print_info "✅ Cloud setup completed"
|
if [ "${SKIP_SCAFFOLD}" = false ]; then
|
||||||
|
print_info "✅ Component 1: Scaffold setup (cloud initialization)"
|
||||||
if [ "${SKIP_INSTALLER}" = false ]; then
|
|
||||||
print_info "✅ Phase 1: Installer image generated"
|
|
||||||
else
|
else
|
||||||
print_info "⏸️ Phase 1: Installer image generation (skipped)"
|
print_info "⏸️ Component 1: Scaffold setup (skipped)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${SKIP_HARDWARE}" = false ]; then
|
if [ "${SKIP_CLUSTER}" = false ]; then
|
||||||
print_info "✅ Phase 2: Node hardware detection completed"
|
print_info "✅ Component 2: Cluster setup (Phases 1-3)"
|
||||||
else
|
else
|
||||||
print_info "⏸️ Phase 2: Node hardware detection (skipped)"
|
print_info "⏸️ Component 2: Cluster setup (skipped)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${SKIP_CONFIGS}" = false ]; then
|
if [ "${SKIP_SERVICES}" = false ]; then
|
||||||
print_info "✅ Phase 3: Machine configs generated"
|
print_info "✅ Component 3: Services setup (Phase 4)"
|
||||||
else
|
else
|
||||||
print_info "⏸️ Phase 3: Machine config generation (skipped)"
|
print_info "⏸️ Component 3: Services setup (skipped)"
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${SKIP_INSTALL}" = false ]; then
|
|
||||||
print_info "✅ Phase 4: Cluster services installed"
|
|
||||||
else
|
|
||||||
print_info "⏸️ Phase 4: Cluster services installation (skipped)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_info "✅ Configuration completed as needed by phases"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
print_info "Configuration files:"
|
|
||||||
echo " - ${WC_HOME}/config.yaml"
|
|
||||||
echo " - ${WC_HOME}/secrets.yaml"
|
|
||||||
|
|
||||||
if [ -d "${WC_HOME}/setup/cluster-nodes/final" ] && [ "$(ls -A ${WC_HOME}/setup/cluster-nodes/final 2>/dev/null)" ]; then
|
|
||||||
echo ""
|
|
||||||
print_info "Machine configurations:"
|
|
||||||
for config_file in "${WC_HOME}/setup/cluster-nodes/final"/*.yaml; do
|
|
||||||
if [ -f "$config_file" ]; then
|
|
||||||
echo " - $config_file"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
print_info "Next steps:"
|
print_info "Individual components can be run separately:"
|
||||||
echo " 1. Review your configuration and generated files"
|
echo " - wild-setup-scaffold # Cloud initialization"
|
||||||
|
echo " - wild-setup-cluster # Cluster infrastructure"
|
||||||
|
echo " - wild-setup-services # Cluster services"
|
||||||
|
|
||||||
if [ "${SKIP_HARDWARE}" = true ] || [ "${SKIP_CONFIGS}" = true ]; then
|
echo ""
|
||||||
echo " 2. Complete any skipped phases as needed:"
|
if [ "${SKIP_SERVICES}" = false ] && command -v kubectl >/dev/null 2>&1; then
|
||||||
if [ "${SKIP_HARDWARE}" = true ]; then
|
if [ -f "${WC_HOME}/config.yaml" ]; then
|
||||||
echo " - Re-run wild-init to continue with hardware detection"
|
|
||||||
fi
|
|
||||||
if [ "${SKIP_CONFIGS}" = true ]; then
|
|
||||||
echo " - Generate machine configs after hardware detection"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${SKIP_INSTALL}" = false ] && command -v kubectl >/dev/null 2>&1; then
|
|
||||||
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain 2>/dev/null || echo "your-internal-domain")
|
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain 2>/dev/null || echo "your-internal-domain")
|
||||||
echo " 2. Access the dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
print_info "Your Wild-Cloud is ready!"
|
||||||
echo " 3. Get the dashboard token with: ./bin/dashboard-token"
|
echo " Dashboard: https://dashboard.${INTERNAL_DOMAIN}"
|
||||||
echo ""
|
echo " Get token: ./bin/dashboard-token"
|
||||||
echo "To verify components, run:"
|
fi
|
||||||
echo " - kubectl get pods -n cert-manager"
|
|
||||||
echo " - kubectl get pods -n externaldns"
|
|
||||||
echo " - kubectl get pods -n kubernetes-dashboard"
|
|
||||||
echo " - kubectl get clusterissuers"
|
|
||||||
else
|
else
|
||||||
echo " 2. Set up your cluster and install services"
|
print_info "Complete the remaining setup steps to finalize your Wild-Cloud deployment"
|
||||||
echo " 3. Apply machine configurations to your nodes"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
print_success "Wild-Cloud setup completed!"
|
728
bin/wild-setup-cluster
Executable file
728
bin/wild-setup-cluster
Executable file
@@ -0,0 +1,728 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Get WC_ROOT (where this script and templates live)
|
||||||
|
WC_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
export WC_ROOT
|
||||||
|
|
||||||
|
# Set up cloud directory (WC_HOME is where user's cloud will be)
|
||||||
|
WC_HOME="$(pwd)"
|
||||||
|
export WC_HOME
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# HELPER FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
print_header() {
|
||||||
|
echo -e "\n${BLUE}=== $1 ===${NC}\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}INFO:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}WARNING:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}SUCCESS:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}ERROR:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to prompt for input with default value
|
||||||
|
prompt_with_default() {
|
||||||
|
local prompt="$1"
|
||||||
|
local default="$2"
|
||||||
|
local current_value="$3"
|
||||||
|
local result
|
||||||
|
|
||||||
|
if [ -n "${current_value}" ] && [ "${current_value}" != "null" ]; then
|
||||||
|
printf "%s [current: %s]: " "${prompt}" "${current_value}" >&2
|
||||||
|
read -r result
|
||||||
|
if [ -z "${result}" ]; then
|
||||||
|
result="${current_value}"
|
||||||
|
fi
|
||||||
|
elif [ -n "${default}" ]; then
|
||||||
|
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
||||||
|
read -r result
|
||||||
|
if [ -z "${result}" ]; then
|
||||||
|
result="${default}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
printf "%s: " "${prompt}" >&2
|
||||||
|
read -r result
|
||||||
|
while [ -z "${result}" ]; do
|
||||||
|
printf "This value is required. Please enter a value: " >&2
|
||||||
|
read -r result
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${result}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current config value safely
|
||||||
|
get_current_config() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/config.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-config "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current secret value safely
|
||||||
|
get_current_secret() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/secrets.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-secret "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Phase tracking variables
|
||||||
|
SKIP_INSTALLER=false
|
||||||
|
SKIP_HARDWARE=false
|
||||||
|
SKIP_CONFIGS=false
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
--skip-installer)
|
||||||
|
SKIP_INSTALLER=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--skip-hardware)
|
||||||
|
SKIP_HARDWARE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--skip-configs)
|
||||||
|
SKIP_CONFIGS=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
echo "Usage: $0 [phase-options]"
|
||||||
|
echo ""
|
||||||
|
echo "Set up Kubernetes cluster infrastructure (Phases 1-3)."
|
||||||
|
echo ""
|
||||||
|
echo "Phase Control Options:"
|
||||||
|
echo " --skip-installer Skip Phase 1 (Installer image generation)"
|
||||||
|
echo " --skip-hardware Skip Phase 2 (Node hardware detection)"
|
||||||
|
echo " --skip-configs Skip Phase 3 (Machine config generation)"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "Phases:"
|
||||||
|
echo " 1. Installer image - Generate custom Talos installer URLs"
|
||||||
|
echo " 2. Hardware detection - Discover node interfaces and disks"
|
||||||
|
echo " 3. Machine configs - Generate Talos machine configurations"
|
||||||
|
echo ""
|
||||||
|
echo "Prerequisites:"
|
||||||
|
echo " - Run 'wild-setup-scaffold' first to initialize the cloud"
|
||||||
|
echo ""
|
||||||
|
echo "After completion:"
|
||||||
|
echo " - Run 'wild-setup-services' to install cluster services"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
echo "Usage: $0 [phase-options]"
|
||||||
|
echo "Use --help for full usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected argument: $1"
|
||||||
|
echo "Usage: $0 [phase-options]"
|
||||||
|
echo "Use --help for full usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if we're in a wild-cloud directory
|
||||||
|
if [ ! -d ".wildcloud" ]; then
|
||||||
|
print_error "You must run this script from a wild-cloud directory"
|
||||||
|
print_info "Run 'wild-setup-scaffold' first to initialize a wild-cloud project"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check basic configuration
|
||||||
|
if [ -z "$(get_current_config "operator.email")" ]; then
|
||||||
|
print_error "Basic configuration is missing"
|
||||||
|
print_info "Run 'wild-setup-scaffold' first to configure basic settings"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_header "Wild-Cloud Cluster Setup"
|
||||||
|
print_info "Setting up Kubernetes cluster infrastructure (Phases 1-3)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PHASE 1: Talos asset download
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
if [ "${SKIP_INSTALLER}" = false ]; then
|
||||||
|
print_header "Phase 1: Installer Image Generation"
|
||||||
|
|
||||||
|
print_info "Running wild-cluster-node-image-create..."
|
||||||
|
wild-cluster-node-image-create
|
||||||
|
|
||||||
|
print_success "Phase 1 completed: Installer image generated"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
print_info "Skipping Phase 1: Installer Image Generation"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PHASE 2: Node Hardware Detection
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
if [ "${SKIP_HARDWARE}" = false ]; then
|
||||||
|
print_header "Phase 2: Node Hardware Detection"
|
||||||
|
|
||||||
|
# Configure network settings
|
||||||
|
if [ -z "$(get_current_config "cloud.router.ip")" ]; then
|
||||||
|
print_header "Network Configuration"
|
||||||
|
|
||||||
|
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
||||||
|
GATEWAY_IP=$(ip route | grep default | awk '{print $3; exit}' 2>/dev/null || echo "192.168.1.1")
|
||||||
|
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
||||||
|
|
||||||
|
current_router_ip=$(get_current_config "cloud.router.ip")
|
||||||
|
router_ip=$(prompt_with_default "Router/Gateway IP" "${GATEWAY_IP}" "${current_router_ip}")
|
||||||
|
wild-config-set "cloud.router.ip" "${router_ip}"
|
||||||
|
|
||||||
|
current_dns_ip=$(get_current_config "cloud.dns.ip")
|
||||||
|
dns_ip=$(prompt_with_default "DNS server IP (dnsmasq machine)" "${SUBNET_PREFIX}.50" "${current_dns_ip}")
|
||||||
|
wild-config-set "cloud.dns.ip" "${dns_ip}"
|
||||||
|
|
||||||
|
current_dhcp_range=$(get_current_config "cloud.dhcpRange")
|
||||||
|
dhcp_range=$(prompt_with_default "DHCP range for dnsmasq" "${SUBNET_PREFIX}.100,${SUBNET_PREFIX}.200" "${current_dhcp_range}")
|
||||||
|
wild-config-set "cloud.dhcpRange" "${dhcp_range}"
|
||||||
|
|
||||||
|
current_interface=$(get_current_config "cloud.dnsmasq.interface")
|
||||||
|
interface=$(prompt_with_default "Network interface for dnsmasq" "eth0" "${current_interface}")
|
||||||
|
wild-config-set "cloud.dnsmasq.interface" "${interface}"
|
||||||
|
|
||||||
|
current_external_resolver=$(get_current_config "cloud.dns.externalResolver")
|
||||||
|
external_resolver=$(prompt_with_default "External DNS resolver" "1.1.1.1" "${current_external_resolver}")
|
||||||
|
wild-config-set "cloud.dns.externalResolver" "${external_resolver}"
|
||||||
|
|
||||||
|
print_success "Network configuration completed"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure cluster settings
|
||||||
|
print_header "Kubernetes Cluster Configuration"
|
||||||
|
|
||||||
|
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
||||||
|
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
||||||
|
|
||||||
|
# Talos version
|
||||||
|
current_talos_version=$(get_current_config "cluster.nodes.talos.version")
|
||||||
|
if [ -z "$current_talos_version" ] || [ "$current_talos_version" = "null" ]; then
|
||||||
|
talos_version=$(prompt_with_default "Talos version" "v1.10.4" "${current_talos_version}")
|
||||||
|
wild-config-set "cluster.nodes.talos.version" "${talos_version}"
|
||||||
|
else
|
||||||
|
talos_version="$current_talos_version"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# MetalLB IP address pool
|
||||||
|
current_ip_pool=$(get_current_config "cluster.ipAddressPool")
|
||||||
|
if [ -z "$current_ip_pool" ] || [ "$current_ip_pool" = "null" ]; then
|
||||||
|
ip_pool=$(prompt_with_default "MetalLB IP address pool" "${SUBNET_PREFIX}.80-${SUBNET_PREFIX}.89" "${current_ip_pool}")
|
||||||
|
wild-config-set "cluster.ipAddressPool" "${ip_pool}"
|
||||||
|
else
|
||||||
|
ip_pool="$current_ip_pool"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Load balancer IP (automatically set to first address in the pool)
|
||||||
|
current_lb_ip=$(get_current_config "cluster.loadBalancerIp")
|
||||||
|
if [ -z "$current_lb_ip" ] || [ "$current_lb_ip" = "null" ]; then
|
||||||
|
lb_ip=$(echo "${ip_pool}" | cut -d'-' -f1)
|
||||||
|
wild-config-set "cluster.loadBalancerIp" "${lb_ip}"
|
||||||
|
print_info "Set load balancer IP to: ${lb_ip} (first IP in MetalLB pool)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Talos schematic ID
|
||||||
|
current_schematic_id=$(get_current_config "cluster.nodes.talos.schematicId")
|
||||||
|
if [ -z "$current_schematic_id" ] || [ "$current_schematic_id" = "null" ]; then
|
||||||
|
echo ""
|
||||||
|
print_info "Get your Talos schematic ID from: https://factory.talos.dev/"
|
||||||
|
print_info "This customizes Talos with the drivers needed for your hardware."
|
||||||
|
|
||||||
|
# Look up default schematic ID from talos-schemas.yaml
|
||||||
|
default_schematic_id=""
|
||||||
|
schemas_file="${WC_ROOT}/setup/cluster-nodes/talos-schemas.yaml"
|
||||||
|
if [ -f "$schemas_file" ]; then
|
||||||
|
default_schematic_id=$(yq eval ".talos-schemas.\"${talos_version}\"" "$schemas_file" 2>/dev/null)
|
||||||
|
if [ -n "$default_schematic_id" ] && [ "$default_schematic_id" != "null" ]; then
|
||||||
|
print_info "Default schematic ID available for Talos $talos_version"
|
||||||
|
else
|
||||||
|
default_schematic_id=""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
schematic_id=$(prompt_with_default "Talos schematic ID" "${default_schematic_id}" "${current_schematic_id}")
|
||||||
|
wild-config-set "cluster.nodes.talos.schematicId" "${schematic_id}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# External DNS
|
||||||
|
current_owner_id=$(get_current_config "cluster.externalDns.ownerId")
|
||||||
|
if [ -z "$current_owner_id" ] || [ "$current_owner_id" = "null" ]; then
|
||||||
|
cluster_name=$(get_current_config "cluster.name")
|
||||||
|
owner_id=$(prompt_with_default "External DNS owner ID" "external-dns-${cluster_name}" "${current_owner_id}")
|
||||||
|
wild-config-set "cluster.externalDns.ownerId" "${owner_id}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Cluster configuration completed"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
print_info "This phase will help you register Talos nodes by discovering their hardware."
|
||||||
|
print_info "You'll need nodes booted in maintenance mode and accessible via IP."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Configure control plane network topology first
|
||||||
|
if [ -z "$(get_current_config "cluster.nodes.control.vip")" ]; then
|
||||||
|
print_header "Control Plane Network Configuration"
|
||||||
|
|
||||||
|
# Detect current network for suggestions
|
||||||
|
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
||||||
|
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
||||||
|
|
||||||
|
print_info "Configure control plane nodes (you need at least 3 for HA):"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
current_vip=$(get_current_config "cluster.nodes.control.vip")
|
||||||
|
vip=$(prompt_with_default "Control plane virtual IP" "${SUBNET_PREFIX}.90" "${current_vip}")
|
||||||
|
wild-config-set "cluster.nodes.control.vip" "${vip}"
|
||||||
|
|
||||||
|
# Automatically configure the first three IPs after VIP for control plane nodes
|
||||||
|
vip_last_octet=$(echo "$vip" | cut -d. -f4)
|
||||||
|
vip_prefix=$(echo "$vip" | cut -d. -f1-3)
|
||||||
|
|
||||||
|
print_info "Configuring control plane nodes using consecutive IPs after VIP:"
|
||||||
|
for i in 1 2 3; do
|
||||||
|
node_ip="${vip_prefix}.$(( vip_last_octet + i ))"
|
||||||
|
print_info " Control plane node $i: $node_ip"
|
||||||
|
|
||||||
|
# Initialize the node in cluster.nodes.active if not already present
|
||||||
|
if [ -z "$(get_current_config "cluster.nodes.active.\"${node_ip}\".control")" ]; then
|
||||||
|
wild-config-set "cluster.nodes.active.\"${node_ip}\".control" "true"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
print_success "Control plane network configuration completed"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Generate initial cluster configuration
|
||||||
|
print_header "Cluster Configuration Generation"
|
||||||
|
print_info "Generating base cluster configuration with talosctl gen config..."
|
||||||
|
wild-cluster-config-generate
|
||||||
|
|
||||||
|
# Detect and register control plane nodes
|
||||||
|
print_header "Control Plane Node Registration"
|
||||||
|
|
||||||
|
# Get VIP to determine control plane IPs
|
||||||
|
vip=$(get_current_config "cluster.nodes.control.vip")
|
||||||
|
if [ -z "$vip" ]; then
|
||||||
|
print_error "VIP not configured. Run control plane network configuration first."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
vip_last_octet=$(echo "$vip" | cut -d. -f4)
|
||||||
|
vip_prefix=$(echo "$vip" | cut -d. -f1-3)
|
||||||
|
|
||||||
|
# Process each control plane node IP
|
||||||
|
for i in 1 2 3; do
|
||||||
|
TARGET_IP="${vip_prefix}.$(( vip_last_octet + i ))"
|
||||||
|
echo ""
|
||||||
|
print_info "Registering control plane node: $TARGET_IP"
|
||||||
|
|
||||||
|
# Check if node is already configured
|
||||||
|
existing_interface=$(get_current_config "cluster.nodes.active.\"${TARGET_IP}\".interface")
|
||||||
|
if [ -n "$existing_interface" ] && [ "$existing_interface" != "null" ]; then
|
||||||
|
print_success "Node $TARGET_IP already configured"
|
||||||
|
print_info " - Interface: $existing_interface"
|
||||||
|
print_info " - Disk: $(get_current_config "cluster.nodes.active.\"${TARGET_IP}\".disk")"
|
||||||
|
|
||||||
|
# Still generate machine config if it doesn't exist
|
||||||
|
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||||
|
CONFIG_FILE="${NODE_SETUP_DIR}/final/${TARGET_IP}.yaml"
|
||||||
|
if [ ! -f "$CONFIG_FILE" ]; then
|
||||||
|
print_info "Generating missing machine configuration for $TARGET_IP..."
|
||||||
|
if wild-cluster-node-machine-config-generate "$TARGET_IP"; then
|
||||||
|
print_success "Machine configuration generated for $TARGET_IP"
|
||||||
|
else
|
||||||
|
print_warning "Failed to generate machine configuration for $TARGET_IP"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_info " ✓ Machine config exists: $CONFIG_FILE"
|
||||||
|
fi
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
read -p "Do you want to register control plane node $TARGET_IP now? (y/N): " -r register_node
|
||||||
|
if [[ ! $register_node =~ ^[Yy]$ ]]; then
|
||||||
|
print_info "Skipping node $TARGET_IP registration"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# First try to detect at target IP
|
||||||
|
print_info "Attempting detection at target IP $TARGET_IP..."
|
||||||
|
DETECTION_IP="$TARGET_IP"
|
||||||
|
NODE_INFO=""
|
||||||
|
|
||||||
|
if wild-node-detect "$TARGET_IP" >/dev/null 2>&1; then
|
||||||
|
NODE_INFO=$(wild-node-detect "$TARGET_IP")
|
||||||
|
print_success "Node detected at target IP $TARGET_IP"
|
||||||
|
else
|
||||||
|
# Fall back to maintenance IP
|
||||||
|
print_warning "Node not accessible at target IP $TARGET_IP"
|
||||||
|
read -p "Enter maintenance IP for this node: " -r MAINTENANCE_IP
|
||||||
|
|
||||||
|
if [ -z "$MAINTENANCE_IP" ]; then
|
||||||
|
print_warning "Skipping node $TARGET_IP registration"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_info "Attempting detection at maintenance IP $MAINTENANCE_IP..."
|
||||||
|
if wild-node-detect "$MAINTENANCE_IP" >/dev/null 2>&1; then
|
||||||
|
NODE_INFO=$(wild-node-detect "$MAINTENANCE_IP")
|
||||||
|
DETECTION_IP="$MAINTENANCE_IP"
|
||||||
|
|
||||||
|
# Store maintenance IP for reference
|
||||||
|
wild-config-set "cluster.nodes.active.\"${TARGET_IP}\".maintenanceIp" "$MAINTENANCE_IP"
|
||||||
|
print_success "Node detected at maintenance IP $MAINTENANCE_IP"
|
||||||
|
else
|
||||||
|
print_error "Failed to detect node at $MAINTENANCE_IP"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$NODE_INFO" ]; then
|
||||||
|
# Parse JSON response
|
||||||
|
INTERFACE=$(echo "$NODE_INFO" | jq -r '.interface')
|
||||||
|
SELECTED_DISK=$(echo "$NODE_INFO" | jq -r '.selected_disk')
|
||||||
|
AVAILABLE_DISKS=$(echo "$NODE_INFO" | jq -r '.disks | join(", ")')
|
||||||
|
|
||||||
|
print_success "Hardware detected:"
|
||||||
|
print_info " - Interface: $INTERFACE"
|
||||||
|
print_info " - Available disks: $AVAILABLE_DISKS"
|
||||||
|
print_info " - Selected disk: $SELECTED_DISK"
|
||||||
|
|
||||||
|
# Allow user to override disk selection
|
||||||
|
echo ""
|
||||||
|
read -p "Use selected disk '$SELECTED_DISK'? (Y/n): " -r use_disk
|
||||||
|
if [[ $use_disk =~ ^[Nn]$ ]]; then
|
||||||
|
echo "Available disks:"
|
||||||
|
echo "$NODE_INFO" | jq -r '.disks[]' | nl -w2 -s') '
|
||||||
|
read -p "Enter disk number: " -r disk_num
|
||||||
|
SELECTED_DISK=$(echo "$NODE_INFO" | jq -r ".disks[$((disk_num-1))]")
|
||||||
|
if [ "$SELECTED_DISK" = "null" ] || [ -z "$SELECTED_DISK" ]; then
|
||||||
|
print_error "Invalid disk selection"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
print_info "Selected disk: $SELECTED_DISK"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update config.yaml with hardware info
|
||||||
|
print_info "Updating configuration for $TARGET_IP..."
|
||||||
|
wild-config-set "cluster.nodes.active.\"${TARGET_IP}\".interface" "$INTERFACE"
|
||||||
|
wild-config-set "cluster.nodes.active.\"${TARGET_IP}\".disk" "$SELECTED_DISK"
|
||||||
|
wild-config-set "cluster.nodes.active.\"${TARGET_IP}\".control" "true"
|
||||||
|
|
||||||
|
print_success "Node $TARGET_IP registered successfully"
|
||||||
|
|
||||||
|
# Generate machine config immediately
|
||||||
|
print_info "Generating machine configuration for $TARGET_IP..."
|
||||||
|
if wild-cluster-node-machine-config-generate "$TARGET_IP"; then
|
||||||
|
print_success "Machine configuration generated for $TARGET_IP"
|
||||||
|
|
||||||
|
# Ask if user wants to apply the configuration now
|
||||||
|
echo ""
|
||||||
|
read -p "Apply configuration to node $TARGET_IP now? (y/N): " -r apply_config
|
||||||
|
if [[ $apply_config =~ ^[Yy]$ ]]; then
|
||||||
|
if [ "$DETECTION_IP" != "$TARGET_IP" ]; then
|
||||||
|
# Node is in maintenance mode, use insecure flag
|
||||||
|
print_info "Applying configuration in insecure mode (maintenance mode)..."
|
||||||
|
wild-cluster-node-up "$TARGET_IP" --insecure
|
||||||
|
else
|
||||||
|
# Node is already configured, use secure mode
|
||||||
|
print_info "Applying configuration..."
|
||||||
|
wild-cluster-node-up "$TARGET_IP"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_info "Configuration not applied. You can apply it later with:"
|
||||||
|
print_info " wild-cluster-node-up $TARGET_IP --insecure"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_warning "Failed to generate machine configuration for $TARGET_IP"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Register worker nodes
|
||||||
|
echo ""
|
||||||
|
print_info "Configure worker nodes (optional):"
|
||||||
|
while true; do
|
||||||
|
echo ""
|
||||||
|
read -p "Do you want to register a worker node? (y/N): " -r register_worker
|
||||||
|
|
||||||
|
if [[ $register_worker =~ ^[Yy]$ ]]; then
|
||||||
|
read -p "Enter maintenance IP for worker node: " -r WORKER_IP
|
||||||
|
|
||||||
|
if [ -z "$WORKER_IP" ]; then
|
||||||
|
print_warning "No IP provided, skipping worker node"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_info "Running wild-node-detect for worker node $WORKER_IP..."
|
||||||
|
# Run detection and capture both output and stderr for debugging
|
||||||
|
DETECTION_OUTPUT=$(mktemp)
|
||||||
|
DETECTION_ERROR=$(mktemp)
|
||||||
|
if wild-node-detect "$WORKER_IP" >"$DETECTION_OUTPUT" 2>"$DETECTION_ERROR"; then
|
||||||
|
WORKER_INFO=$(cat "$DETECTION_OUTPUT")
|
||||||
|
print_success "Worker node detected at IP $WORKER_IP"
|
||||||
|
rm -f "$DETECTION_OUTPUT" "$DETECTION_ERROR"
|
||||||
|
else
|
||||||
|
print_error "Failed to detect hardware for worker node $WORKER_IP"
|
||||||
|
print_info "Detection error output:"
|
||||||
|
cat "$DETECTION_ERROR" >&2
|
||||||
|
print_info "Make sure the node is running in maintenance mode and accessible"
|
||||||
|
rm -f "$DETECTION_OUTPUT" "$DETECTION_ERROR"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$WORKER_INFO" ]; then
|
||||||
|
# Parse JSON response
|
||||||
|
INTERFACE=$(echo "$WORKER_INFO" | jq -r '.interface')
|
||||||
|
SELECTED_DISK=$(echo "$WORKER_INFO" | jq -r '.selected_disk')
|
||||||
|
AVAILABLE_DISKS=$(echo "$WORKER_INFO" | jq -r '.disks | join(", ")')
|
||||||
|
|
||||||
|
print_success "Hardware detected for worker node $WORKER_IP:"
|
||||||
|
print_info " - Interface: $INTERFACE"
|
||||||
|
print_info " - Available disks: $AVAILABLE_DISKS"
|
||||||
|
print_info " - Selected disk: $SELECTED_DISK"
|
||||||
|
|
||||||
|
# Allow user to override disk selection
|
||||||
|
echo ""
|
||||||
|
read -p "Use selected disk '$SELECTED_DISK'? (Y/n): " -r use_disk
|
||||||
|
if [[ $use_disk =~ ^[Nn]$ ]]; then
|
||||||
|
echo "Available disks:"
|
||||||
|
echo "$WORKER_INFO" | jq -r '.disks[]' | nl -w2 -s') '
|
||||||
|
read -p "Enter disk number: " -r disk_num
|
||||||
|
SELECTED_DISK=$(echo "$WORKER_INFO" | jq -r ".disks[$((disk_num-1))]")
|
||||||
|
if [ "$SELECTED_DISK" = "null" ] || [ -z "$SELECTED_DISK" ]; then
|
||||||
|
print_error "Invalid disk selection"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
print_info "Selected disk: $SELECTED_DISK"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update config.yaml with worker hardware info
|
||||||
|
print_info "Updating config.yaml for worker node $WORKER_IP..."
|
||||||
|
|
||||||
|
# Store under unified cluster.nodes.active.<ip-address>
|
||||||
|
wild-config-set "cluster.nodes.active.\"${WORKER_IP}\".interface" "$INTERFACE"
|
||||||
|
wild-config-set "cluster.nodes.active.\"${WORKER_IP}\".disk" "$SELECTED_DISK"
|
||||||
|
wild-config-set "cluster.nodes.active.\"${WORKER_IP}\".control" "false"
|
||||||
|
|
||||||
|
print_success "Worker node $WORKER_IP registered successfully:"
|
||||||
|
print_info " - IP: $WORKER_IP"
|
||||||
|
print_info " - Interface: $INTERFACE"
|
||||||
|
print_info " - Disk: $SELECTED_DISK"
|
||||||
|
|
||||||
|
# Generate machine config immediately
|
||||||
|
print_info "Generating machine configuration for $WORKER_IP..."
|
||||||
|
if wild-cluster-node-machine-config-generate "$WORKER_IP"; then
|
||||||
|
print_success "Machine configuration generated for $WORKER_IP"
|
||||||
|
|
||||||
|
# Ask if user wants to apply the configuration now
|
||||||
|
echo ""
|
||||||
|
read -p "Apply configuration to worker node $WORKER_IP now? (y/N): " -r apply_config
|
||||||
|
if [[ $apply_config =~ ^[Yy]$ ]]; then
|
||||||
|
# Worker nodes are typically in maintenance mode during setup
|
||||||
|
print_info "Applying configuration in insecure mode (maintenance mode)..."
|
||||||
|
wild-cluster-node-up "$WORKER_IP" --insecure
|
||||||
|
else
|
||||||
|
print_info "Configuration not applied. You can apply it later with:"
|
||||||
|
print_info " wild-cluster-node-up $WORKER_IP --insecure"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_warning "Failed to generate machine configuration for $WORKER_IP"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_error "Failed to detect hardware for worker node $WORKER_IP"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
print_success "Phase 2 completed: Node hardware detection"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
print_info "Skipping Phase 2: Node Hardware Detection"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PHASE 3: Machine Config Summary and Verification
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
if [ "${SKIP_CONFIGS}" = false ]; then
|
||||||
|
print_header "Phase 3: Machine Config Summary and Verification"
|
||||||
|
|
||||||
|
# Get all registered nodes from cluster.nodes.active
|
||||||
|
REGISTERED_NODES=()
|
||||||
|
CONTROL_NODES=()
|
||||||
|
WORKER_NODES=()
|
||||||
|
|
||||||
|
if yq eval '.cluster.nodes.active // {}' "${WC_HOME}/config.yaml" | grep -q "interface"; then
|
||||||
|
ALL_NODE_IPS=$(yq eval '.cluster.nodes.active | keys | .[]' "${WC_HOME}/config.yaml" 2>/dev/null || echo "")
|
||||||
|
|
||||||
|
for NODE_IP in $ALL_NODE_IPS; do
|
||||||
|
# Remove quotes from yq output
|
||||||
|
NODE_IP=$(echo "$NODE_IP" | tr -d '"')
|
||||||
|
REGISTERED_NODES+=("$NODE_IP")
|
||||||
|
|
||||||
|
# Check if it's a control node
|
||||||
|
IS_CONTROL=$(yq eval ".cluster.nodes.active.\"${NODE_IP}\".control" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||||
|
if [ "$IS_CONTROL" = "true" ]; then
|
||||||
|
CONTROL_NODES+=("$NODE_IP")
|
||||||
|
else
|
||||||
|
WORKER_NODES+=("$NODE_IP")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${#REGISTERED_NODES[@]} -eq 0 ]; then
|
||||||
|
print_warning "No nodes have been registered yet."
|
||||||
|
print_info "Run Phase 2 (Hardware Detection) first to register nodes"
|
||||||
|
else
|
||||||
|
print_success "Machine configuration summary:"
|
||||||
|
echo ""
|
||||||
|
print_info "Registered nodes: ${#REGISTERED_NODES[@]}"
|
||||||
|
print_info " Control plane nodes: ${#CONTROL_NODES[@]}"
|
||||||
|
print_info " Worker nodes: ${#WORKER_NODES[@]}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check for any nodes that might need machine config generation
|
||||||
|
MISSING_CONFIGS=()
|
||||||
|
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||||
|
|
||||||
|
if [ -d "$NODE_SETUP_DIR/final" ]; then
|
||||||
|
for NODE_IP in "${REGISTERED_NODES[@]}"; do
|
||||||
|
CONFIG_FILE="$NODE_SETUP_DIR/final/${NODE_IP}.yaml"
|
||||||
|
|
||||||
|
if [ ! -f "$CONFIG_FILE" ]; then
|
||||||
|
MISSING_CONFIGS+=("$NODE_IP")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
MISSING_CONFIGS=("${REGISTERED_NODES[@]}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ${#MISSING_CONFIGS[@]} -gt 0 ]; then
|
||||||
|
print_warning "Some nodes are missing machine configurations:"
|
||||||
|
for NODE_IP in "${MISSING_CONFIGS[@]}"; do
|
||||||
|
print_info "Generating missing config for $NODE_IP..."
|
||||||
|
wild-cluster-node-machine-config-generate "$NODE_IP"
|
||||||
|
done
|
||||||
|
else
|
||||||
|
print_success "All registered nodes have machine configurations"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Machine configuration files:"
|
||||||
|
for NODE_IP in "${REGISTERED_NODES[@]}"; do
|
||||||
|
CONFIG_FILE="$NODE_SETUP_DIR/final/${NODE_IP}.yaml"
|
||||||
|
if [ -f "$CONFIG_FILE" ]; then
|
||||||
|
NODE_TYPE=$(yq eval ".cluster.nodes.active.\"${NODE_IP}\".control" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||||
|
if [ "$NODE_TYPE" = "true" ]; then
|
||||||
|
print_success " ✓ $CONFIG_FILE (control plane)"
|
||||||
|
else
|
||||||
|
print_success " ✓ $CONFIG_FILE (worker)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Phase 3 completed: Machine config summary and verification"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
print_info "Skipping Phase 3: Machine Config Summary and Verification"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# COMPLETION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
print_header "Wild-Cloud Cluster Setup Complete!"
|
||||||
|
|
||||||
|
print_success "Cluster infrastructure setup completed!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
print_info "What was accomplished:"
|
||||||
|
if [ "${SKIP_INSTALLER}" = false ]; then
|
||||||
|
print_info "✅ Phase 1: Installer image generated"
|
||||||
|
else
|
||||||
|
print_info "⏸️ Phase 1: Installer image generation (skipped)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${SKIP_HARDWARE}" = false ]; then
|
||||||
|
print_info "✅ Phase 2: Node hardware detection completed"
|
||||||
|
else
|
||||||
|
print_info "⏸️ Phase 2: Node hardware detection (skipped)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${SKIP_CONFIGS}" = false ]; then
|
||||||
|
print_info "✅ Phase 3: Machine configs generated"
|
||||||
|
else
|
||||||
|
print_info "⏸️ Phase 3: Machine config generation (skipped)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Configuration files:"
|
||||||
|
echo " - ${WC_HOME}/config.yaml"
|
||||||
|
echo " - ${WC_HOME}/secrets.yaml"
|
||||||
|
|
||||||
|
if [ -d "${WC_HOME}/setup/cluster-nodes/final" ] && [ "$(ls -A ${WC_HOME}/setup/cluster-nodes/final 2>/dev/null)" ]; then
|
||||||
|
echo ""
|
||||||
|
print_info "Machine configurations:"
|
||||||
|
for config_file in "${WC_HOME}/setup/cluster-nodes/final"/*.yaml; do
|
||||||
|
if [ -f "$config_file" ]; then
|
||||||
|
echo " - $config_file"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Next steps:"
|
||||||
|
echo " 1. Apply machine configurations to your nodes"
|
||||||
|
echo " 2. Bootstrap your cluster and verify it's running"
|
||||||
|
echo " 3. Install cluster services:"
|
||||||
|
echo " wild-setup-services"
|
||||||
|
|
||||||
|
print_success "Ready for cluster services installation!"
|
303
bin/wild-setup-scaffold
Executable file
303
bin/wild-setup-scaffold
Executable file
@@ -0,0 +1,303 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Get WC_ROOT (where this script and templates live)
|
||||||
|
WC_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
export WC_ROOT
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# HELPER FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
print_header() {
|
||||||
|
echo -e "\n${BLUE}=== $1 ===${NC}\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}INFO:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}WARNING:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}SUCCESS:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}ERROR:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to prompt for input with default value
|
||||||
|
prompt_with_default() {
|
||||||
|
local prompt="$1"
|
||||||
|
local default="$2"
|
||||||
|
local current_value="$3"
|
||||||
|
local result
|
||||||
|
|
||||||
|
if [ -n "${current_value}" ] && [ "${current_value}" != "null" ]; then
|
||||||
|
printf "%s [current: %s]: " "${prompt}" "${current_value}" >&2
|
||||||
|
read -r result
|
||||||
|
if [ -z "${result}" ]; then
|
||||||
|
result="${current_value}"
|
||||||
|
fi
|
||||||
|
elif [ -n "${default}" ]; then
|
||||||
|
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
||||||
|
read -r result
|
||||||
|
if [ -z "${result}" ]; then
|
||||||
|
result="${default}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
printf "%s: " "${prompt}" >&2
|
||||||
|
read -r result
|
||||||
|
while [ -z "${result}" ]; do
|
||||||
|
printf "This value is required. Please enter a value: " >&2
|
||||||
|
read -r result
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${result}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current config value safely
|
||||||
|
get_current_config() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/config.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-config "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current secret value safely
|
||||||
|
get_current_secret() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/secrets.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-secret "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
UPDATE=false
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
--update)
|
||||||
|
UPDATE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
echo "Usage: $0 [--update]"
|
||||||
|
echo ""
|
||||||
|
echo "Initialize Wild-Cloud scaffold and basic configuration."
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " --update Update existing cloud files (overwrite)"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "This script will:"
|
||||||
|
echo " - Initialize the .wildcloud directory structure"
|
||||||
|
echo " - Copy template files to the current directory"
|
||||||
|
echo " - Configure basic settings (email, domains, cluster name)"
|
||||||
|
echo ""
|
||||||
|
echo "After running this script, use:"
|
||||||
|
echo " - wild-setup-cluster # Set up Kubernetes cluster (Phases 1-3)"
|
||||||
|
echo " - wild-setup-services # Install cluster services (Phase 4)"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
echo "Usage: $0 [--update]"
|
||||||
|
echo "Use --help for full usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected argument: $1"
|
||||||
|
echo "Usage: $0 [--update]"
|
||||||
|
echo "Use --help for full usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Set up cloud directory (WC_HOME is where user's cloud will be)
|
||||||
|
WC_HOME="$(pwd)"
|
||||||
|
export WC_HOME
|
||||||
|
|
||||||
|
# Template directory (in WC_ROOT, never written to)
|
||||||
|
TEMPLATE_DIR="${WC_ROOT}/setup/home-scaffold"
|
||||||
|
|
||||||
|
if [ ! -d "${TEMPLATE_DIR}" ]; then
|
||||||
|
echo "Error: Template directory not found at ${TEMPLATE_DIR}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if cloud already exists
|
||||||
|
if [ -d ".wildcloud" ]; then
|
||||||
|
echo "Wild-Cloud already exists in this directory."
|
||||||
|
echo ""
|
||||||
|
read -p "Do you want to update cloud files? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
UPDATE=true
|
||||||
|
echo "Updating cloud files..."
|
||||||
|
else
|
||||||
|
echo "Skipping cloud update."
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Check if current directory is empty for new cloud
|
||||||
|
if [ "${UPDATE}" = false ]; then
|
||||||
|
# Check if directory has any files (including hidden files, excluding . and .. and .git)
|
||||||
|
if [ -n "$(find . -maxdepth 1 -name ".*" -o -name "*" | grep -v "^\.$" | grep -v "^\.\.$" | grep -v "^\./\.git$" | head -1)" ]; then
|
||||||
|
echo "Error: Current directory is not empty"
|
||||||
|
echo "Use --update flag to overwrite existing cloud files while preserving other files"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Initializing Wild-Cloud in $(pwd)"
|
||||||
|
UPDATE=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Initialize cloud files if needed
|
||||||
|
if [ ! -d ".wildcloud" ] || [ "${UPDATE}" = true ]; then
|
||||||
|
if [ "${UPDATE}" = true ]; then
|
||||||
|
echo "Updating cloud files (preserving existing custom files)"
|
||||||
|
else
|
||||||
|
echo "Creating cloud files"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Function to copy files and directories
|
||||||
|
copy_cloud_files() {
|
||||||
|
local src_dir="$1"
|
||||||
|
local dest_dir="$2"
|
||||||
|
|
||||||
|
# Create destination directory if it doesn't exist
|
||||||
|
mkdir -p "${dest_dir}"
|
||||||
|
|
||||||
|
# Copy directory structure
|
||||||
|
find "${src_dir}" -type d | while read -r src_subdir; do
|
||||||
|
rel_path="${src_subdir#${src_dir}}"
|
||||||
|
rel_path="${rel_path#/}" # Remove leading slash if present
|
||||||
|
if [ -n "${rel_path}" ]; then
|
||||||
|
mkdir -p "${dest_dir}/${rel_path}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Copy files
|
||||||
|
find "${src_dir}" -type f | while read -r src_file; do
|
||||||
|
rel_path="${src_file#${src_dir}}"
|
||||||
|
rel_path="${rel_path#/}" # Remove leading slash if present
|
||||||
|
dest_file="${dest_dir}/${rel_path}"
|
||||||
|
|
||||||
|
# Ensure destination directory exists
|
||||||
|
dest_file_dir=$(dirname "${dest_file}")
|
||||||
|
mkdir -p "${dest_file_dir}"
|
||||||
|
|
||||||
|
if [ "${UPDATE}" = true ] && [ -f "${dest_file}" ]; then
|
||||||
|
echo "Updating: ${rel_path}"
|
||||||
|
else
|
||||||
|
echo "Creating: ${rel_path}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
cp "${src_file}" "${dest_file}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Copy cloud files to current directory
|
||||||
|
copy_cloud_files "${TEMPLATE_DIR}" "."
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Wild-Cloud initialized successfully!"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# BASIC CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Configure basic settings
|
||||||
|
if [ ! -f "${WC_HOME}/config.yaml" ] || [ -z "$(get_current_config "operator.email")" ]; then
|
||||||
|
print_header "Basic Configuration"
|
||||||
|
|
||||||
|
# Detect current network for suggestions
|
||||||
|
CURRENT_IP=$(ip route get 8.8.8.8 | awk '{print $7; exit}' 2>/dev/null || echo "192.168.1.100")
|
||||||
|
GATEWAY_IP=$(ip route | grep default | awk '{print $3; exit}' 2>/dev/null || echo "192.168.1.1")
|
||||||
|
SUBNET_PREFIX=$(echo "${CURRENT_IP}" | cut -d. -f1-3)
|
||||||
|
print_info "Detected network: ${SUBNET_PREFIX}.x (gateway: ${GATEWAY_IP})"
|
||||||
|
|
||||||
|
echo "This will configure basic settings for your wild-cloud deployment."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Basic Information
|
||||||
|
current_email=$(get_current_config "operator.email")
|
||||||
|
email=$(prompt_with_default "Your email address (for Let's Encrypt certificates)" "" "${current_email}")
|
||||||
|
wild-config-set "operator.email" "${email}"
|
||||||
|
|
||||||
|
# Domain Configuration
|
||||||
|
current_base_domain=$(get_current_config "cloud.baseDomain")
|
||||||
|
base_domain=$(prompt_with_default "Your base domain name (e.g., example.com)" "" "${current_base_domain}")
|
||||||
|
wild-config-set "cloud.baseDomain" "${base_domain}"
|
||||||
|
|
||||||
|
current_domain=$(get_current_config "cloud.domain")
|
||||||
|
domain=$(prompt_with_default "Your public cloud domain" "cloud.${base_domain}" "${current_domain}")
|
||||||
|
wild-config-set "cloud.domain" "${domain}"
|
||||||
|
|
||||||
|
current_internal_domain=$(get_current_config "cloud.internalDomain")
|
||||||
|
internal_domain=$(prompt_with_default "Your internal cloud domain" "internal.${domain}" "${current_internal_domain}")
|
||||||
|
wild-config-set "cloud.internalDomain" "${internal_domain}"
|
||||||
|
|
||||||
|
# Derive cluster name from domain
|
||||||
|
cluster_name=$(echo "${domain}" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
|
||||||
|
wild-config-set "cluster.name" "${cluster_name}"
|
||||||
|
print_info "Set cluster name to: ${cluster_name}"
|
||||||
|
|
||||||
|
print_success "Basic configuration completed"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# COMPLETION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
print_header "Wild-Cloud Scaffold Setup Complete!"
|
||||||
|
|
||||||
|
print_success "Cloud scaffold initialized successfully!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
print_info "Configuration files:"
|
||||||
|
echo " - ${WC_HOME}/config.yaml"
|
||||||
|
echo " - ${WC_HOME}/secrets.yaml"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
print_info "Next steps:"
|
||||||
|
echo " 1. Set up your Kubernetes cluster:"
|
||||||
|
echo " wild-setup-cluster"
|
||||||
|
echo ""
|
||||||
|
echo " 2. Install cluster services:"
|
||||||
|
echo " wild-setup-services"
|
||||||
|
echo ""
|
||||||
|
echo "Or run the complete setup:"
|
||||||
|
echo " wild-setup"
|
||||||
|
|
||||||
|
print_success "Ready for cluster setup!"
|
337
bin/wild-setup-services
Executable file
337
bin/wild-setup-services
Executable file
@@ -0,0 +1,337 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Get WC_ROOT (where this script and templates live)
|
||||||
|
WC_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
export WC_ROOT
|
||||||
|
|
||||||
|
# Set up cloud directory (WC_HOME is where user's cloud will be)
|
||||||
|
WC_HOME="$(pwd)"
|
||||||
|
export WC_HOME
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# HELPER FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
print_header() {
|
||||||
|
echo -e "\n${BLUE}=== $1 ===${NC}\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_info() {
|
||||||
|
echo -e "${BLUE}INFO:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}WARNING:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}SUCCESS:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}ERROR:${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to prompt for input with default value
|
||||||
|
prompt_with_default() {
|
||||||
|
local prompt="$1"
|
||||||
|
local default="$2"
|
||||||
|
local current_value="$3"
|
||||||
|
local result
|
||||||
|
|
||||||
|
if [ -n "${current_value}" ] && [ "${current_value}" != "null" ]; then
|
||||||
|
printf "%s [current: %s]: " "${prompt}" "${current_value}" >&2
|
||||||
|
read -r result
|
||||||
|
if [ -z "${result}" ]; then
|
||||||
|
result="${current_value}"
|
||||||
|
fi
|
||||||
|
elif [ -n "${default}" ]; then
|
||||||
|
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
||||||
|
read -r result
|
||||||
|
if [ -z "${result}" ]; then
|
||||||
|
result="${default}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
printf "%s: " "${prompt}" >&2
|
||||||
|
read -r result
|
||||||
|
while [ -z "${result}" ]; do
|
||||||
|
printf "This value is required. Please enter a value: " >&2
|
||||||
|
read -r result
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${result}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current config value safely
|
||||||
|
get_current_config() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/config.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-config "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to get current secret value safely
|
||||||
|
get_current_secret() {
|
||||||
|
local key="$1"
|
||||||
|
if [ -f "${WC_HOME}/secrets.yaml" ]; then
|
||||||
|
set +e
|
||||||
|
result=$(wild-secret "${key}" 2>/dev/null)
|
||||||
|
set -e
|
||||||
|
echo "${result}"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
SKIP_INSTALL=false
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
--skip-install)
|
||||||
|
SKIP_INSTALL=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
echo "Usage: $0 [options]"
|
||||||
|
echo ""
|
||||||
|
echo "Install Kubernetes cluster services (Phase 4)."
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " --skip-install Generate service configs but skip installation"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "This script will:"
|
||||||
|
echo " - Configure DNS and SSL certificate settings"
|
||||||
|
echo " - Configure storage settings (NFS, Docker registry)"
|
||||||
|
echo " - Generate cluster service configurations"
|
||||||
|
echo " - Install core services (MetalLB, Traefik, cert-manager, etc.)"
|
||||||
|
echo ""
|
||||||
|
echo "Prerequisites:"
|
||||||
|
echo " - Run 'wild-setup-scaffold' to initialize the cloud"
|
||||||
|
echo " - Run 'wild-setup-cluster' to set up cluster infrastructure"
|
||||||
|
echo " - Kubernetes cluster must be running and kubectl configured"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
echo "Usage: $0 [options]"
|
||||||
|
echo "Use --help for full usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected argument: $1"
|
||||||
|
echo "Usage: $0 [options]"
|
||||||
|
echo "Use --help for full usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if we're in a wild-cloud directory
|
||||||
|
if [ ! -d ".wildcloud" ]; then
|
||||||
|
print_error "You must run this script from a wild-cloud directory"
|
||||||
|
print_info "Run 'wild-setup-scaffold' first to initialize a wild-cloud project"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check basic configuration
|
||||||
|
if [ -z "$(get_current_config "operator.email")" ]; then
|
||||||
|
print_error "Basic configuration is missing"
|
||||||
|
print_info "Run 'wild-setup-scaffold' first to configure basic settings"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check cluster configuration
|
||||||
|
if [ -z "$(get_current_config "cluster.name")" ]; then
|
||||||
|
print_error "Cluster configuration is missing"
|
||||||
|
print_info "Run 'wild-setup-cluster' first to configure cluster settings"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_header "Wild-Cloud Services Setup"
|
||||||
|
print_info "Installing Kubernetes cluster services (Phase 4)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PHASE 4: Cluster Services Installation
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
print_header "Phase 4: Cluster Services Installation"
|
||||||
|
|
||||||
|
# Configure DNS and certificates
|
||||||
|
if [ -z "$(get_current_config "cluster.certManager.cloudflare.domain")" ]; then
|
||||||
|
print_header "DNS and Certificate Configuration"
|
||||||
|
echo "For automatic SSL certificates and DNS management, we use Cloudflare."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
base_domain=$(get_current_config "cloud.baseDomain")
|
||||||
|
domain=$(get_current_config "cloud.domain")
|
||||||
|
|
||||||
|
echo "Is your domain '${base_domain}' registered and managed through Cloudflare? (y/n)"
|
||||||
|
read -r use_cloudflare
|
||||||
|
|
||||||
|
if [[ "${use_cloudflare}" =~ ^[Yy]$ ]]; then
|
||||||
|
wild-config-set "cluster.certManager.cloudflare.domain" "${domain}"
|
||||||
|
|
||||||
|
current_cf_token=$(get_current_secret "cloudflare.token")
|
||||||
|
if [ -z "${current_cf_token}" ]; then
|
||||||
|
echo ""
|
||||||
|
print_info "You'll need a Cloudflare API token with the following permissions:"
|
||||||
|
echo " - Zone:Zone:Read"
|
||||||
|
echo " - Zone:DNS:Edit"
|
||||||
|
echo " - Include:All zones"
|
||||||
|
echo ""
|
||||||
|
echo "Create one at: https://dash.cloudflare.com/profile/api-tokens"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
cf_token=$(prompt_with_default "Cloudflare API token" "" "${current_cf_token}")
|
||||||
|
wild-secret-set "cloudflare.token" "${cf_token}"
|
||||||
|
else
|
||||||
|
print_warning "You'll need to configure DNS and SSL certificates manually."
|
||||||
|
print_info "Consider transferring your domain to Cloudflare for easier management."
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "DNS and certificate configuration completed"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure storage settings
|
||||||
|
print_header "Storage Configuration"
|
||||||
|
|
||||||
|
dns_ip=$(get_current_config "cloud.dns.ip")
|
||||||
|
internal_domain=$(get_current_config "cloud.internalDomain")
|
||||||
|
|
||||||
|
# NFS settings
|
||||||
|
current_nfs_host=$(get_current_config "cloud.nfs.host")
|
||||||
|
if [ -z "$current_nfs_host" ] || [ "$current_nfs_host" = "null" ]; then
|
||||||
|
nfs_host=$(prompt_with_default "NFS server host" "${dns_ip}" "${current_nfs_host}")
|
||||||
|
wild-config-set "cloud.nfs.host" "${nfs_host}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
current_media_path=$(get_current_config "cloud.nfs.mediaPath")
|
||||||
|
if [ -z "$current_media_path" ] || [ "$current_media_path" = "null" ]; then
|
||||||
|
media_path=$(prompt_with_default "NFS media path" "/mnt/storage/media" "${current_media_path}")
|
||||||
|
wild-config-set "cloud.nfs.mediaPath" "${media_path}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
current_storage_capacity=$(get_current_config "cloud.nfs.storageCapacity")
|
||||||
|
if [ -z "$current_storage_capacity" ] || [ "$current_storage_capacity" = "null" ]; then
|
||||||
|
storage_capacity=$(prompt_with_default "Storage capacity for NFS PV" "1Ti" "${current_storage_capacity}")
|
||||||
|
wild-config-set "cloud.nfs.storageCapacity" "${storage_capacity}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Docker Registry settings
|
||||||
|
current_registry_host=$(get_current_config "cloud.dockerRegistryHost")
|
||||||
|
if [ -z "$current_registry_host" ] || [ "$current_registry_host" = "null" ]; then
|
||||||
|
registry_host=$(prompt_with_default "Docker registry hostname" "registry.${internal_domain}" "${current_registry_host}")
|
||||||
|
wild-config-set "cloud.dockerRegistryHost" "${registry_host}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
current_registry_storage=$(get_current_config "cluster.dockerRegistry.storage")
|
||||||
|
if [ -z "$current_registry_storage" ] || [ "$current_registry_storage" = "null" ]; then
|
||||||
|
registry_storage=$(prompt_with_default "Docker registry storage size" "10Gi" "${current_registry_storage}")
|
||||||
|
wild-config-set "cluster.dockerRegistry.storage" "${registry_storage}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Storage configuration completed"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
print_info "This phase prepares and installs core cluster services (MetalLB, Traefik, cert-manager, etc.)"
|
||||||
|
print_warning "Make sure your cluster is running and kubectl is configured!"
|
||||||
|
|
||||||
|
# Generate cluster services setup files
|
||||||
|
print_info "Generating cluster services setup files..."
|
||||||
|
wild-cluster-services-generate --force
|
||||||
|
|
||||||
|
if [ "${SKIP_INSTALL}" = false ]; then
|
||||||
|
read -p "Do you want to install cluster services now? (y/N): " -r install_services
|
||||||
|
|
||||||
|
if [[ $install_services =~ ^[Yy]$ ]]; then
|
||||||
|
print_info "Installing cluster services..."
|
||||||
|
wild-cluster-services-up
|
||||||
|
SERVICES_INSTALLED=true
|
||||||
|
else
|
||||||
|
print_info "Skipping cluster services installation"
|
||||||
|
print_info "You can install them later with: wild-cluster-services-up"
|
||||||
|
SKIP_INSTALL=true
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_info "Skipping cluster services installation (--skip-install specified)"
|
||||||
|
print_info "You can install them later with: wild-cluster-services-up"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${SKIP_INSTALL}" = false ] && [ "${SERVICES_INSTALLED:-false}" = true ]; then
|
||||||
|
print_success "Phase 4 completed: Cluster services installation"
|
||||||
|
else
|
||||||
|
print_success "Phase 4 completed: Cluster services configuration generated"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# COMPLETION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
print_header "Wild-Cloud Services Setup Complete!"
|
||||||
|
|
||||||
|
print_success "Cluster services setup completed!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
print_info "Configuration files:"
|
||||||
|
echo " - ${WC_HOME}/config.yaml"
|
||||||
|
echo " - ${WC_HOME}/secrets.yaml"
|
||||||
|
|
||||||
|
if [ -d "${WC_HOME}/setup/cluster" ]; then
|
||||||
|
echo ""
|
||||||
|
print_info "Generated service configurations:"
|
||||||
|
echo " - ${WC_HOME}/setup/cluster/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
if [ "${SKIP_INSTALL}" = false ] && [ "${SERVICES_INSTALLED:-false}" = true ]; then
|
||||||
|
print_info "Cluster services have been installed!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if command -v kubectl >/dev/null 2>&1; then
|
||||||
|
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain 2>/dev/null || echo "your-internal-domain")
|
||||||
|
echo "Next steps:"
|
||||||
|
echo " 1. Access the dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
||||||
|
echo " 2. Get the dashboard token with: ./bin/dashboard-token"
|
||||||
|
echo ""
|
||||||
|
echo "To verify components, run:"
|
||||||
|
echo " - kubectl get pods -n cert-manager"
|
||||||
|
echo " - kubectl get pods -n externaldns"
|
||||||
|
echo " - kubectl get pods -n kubernetes-dashboard"
|
||||||
|
echo " - kubectl get clusterissuers"
|
||||||
|
else
|
||||||
|
echo "Next steps:"
|
||||||
|
echo " 1. Verify your cluster services are running"
|
||||||
|
echo " 2. Configure kubectl if not already done"
|
||||||
|
echo " 3. Access your services via the configured ingress"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Next steps:"
|
||||||
|
echo " 1. Ensure your cluster is running and kubectl is configured"
|
||||||
|
echo " 2. Install services with: wild-cluster-services-up"
|
||||||
|
echo " 3. Verify components are running correctly"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Wild-Cloud setup completed!"
|
@@ -1,22 +0,0 @@
|
|||||||
machine:
|
|
||||||
install:
|
|
||||||
disk: {{ .cluster.nodes.control.node1.disk }}
|
|
||||||
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
|
||||||
network:
|
|
||||||
interfaces:
|
|
||||||
- interface: {{ .cluster.nodes.control.node1.interface }}
|
|
||||||
dhcp: false
|
|
||||||
addresses:
|
|
||||||
- {{ .cluster.nodes.control.node1.ip }}/24
|
|
||||||
routes:
|
|
||||||
- network: 0.0.0.0/0
|
|
||||||
gateway: {{ .cloud.router.ip }}
|
|
||||||
vip:
|
|
||||||
ip: {{ .cluster.nodes.control.vip }}
|
|
||||||
cluster:
|
|
||||||
discovery:
|
|
||||||
enabled: true
|
|
||||||
registries:
|
|
||||||
service:
|
|
||||||
disabled: true
|
|
||||||
allowSchedulingOnControlPlanes: true
|
|
@@ -1,22 +0,0 @@
|
|||||||
machine:
|
|
||||||
install:
|
|
||||||
disk: {{ .cluster.nodes.control.node2.disk }}
|
|
||||||
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
|
||||||
network:
|
|
||||||
interfaces:
|
|
||||||
- interface: {{ .cluster.nodes.control.node2.interface }}
|
|
||||||
dhcp: false
|
|
||||||
addresses:
|
|
||||||
- {{ .cluster.nodes.control.node2.ip }}/24
|
|
||||||
routes:
|
|
||||||
- network: 0.0.0.0/0
|
|
||||||
gateway: {{ .cloud.router.ip }}
|
|
||||||
vip:
|
|
||||||
ip: {{ .cluster.nodes.control.vip }}
|
|
||||||
cluster:
|
|
||||||
discovery:
|
|
||||||
enabled: true
|
|
||||||
registries:
|
|
||||||
service:
|
|
||||||
disabled: true
|
|
||||||
allowSchedulingOnControlPlanes: true
|
|
@@ -1,13 +1,13 @@
|
|||||||
machine:
|
machine:
|
||||||
install:
|
install:
|
||||||
disk: {{ .cluster.nodes.control.node3.disk }}
|
disk: {{ index .cluster.nodes.active "{{NODE_IP}}" "disk" }}
|
||||||
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
||||||
network:
|
network:
|
||||||
interfaces:
|
interfaces:
|
||||||
- interface: {{ .cluster.nodes.control.node3.interface }}
|
- interface: {{ index .cluster.nodes.active "{{NODE_IP}}" "interface" }}
|
||||||
dhcp: false
|
dhcp: false
|
||||||
addresses:
|
addresses:
|
||||||
- {{ .cluster.nodes.control.node3.ip }}/24
|
- "{{NODE_IP}}/24"
|
||||||
routes:
|
routes:
|
||||||
- network: 0.0.0.0/0
|
- network: 0.0.0.0/0
|
||||||
gateway: {{ .cloud.router.ip }}
|
gateway: {{ .cloud.router.ip }}
|
@@ -1,10 +1,7 @@
|
|||||||
machine:
|
machine:
|
||||||
install:
|
install:
|
||||||
disk: /dev/sdc
|
disk: {{ index .cluster.nodes.active "{{NODE_IP}}" "disk" }}
|
||||||
network:
|
image: factory.talos.dev/metal-installer/{{ .cluster.nodes.talos.schematicId}}:{{ .cluster.nodes.talos.version}}
|
||||||
interfaces:
|
|
||||||
- interface: enp4s0
|
|
||||||
dhcp: true
|
|
||||||
kubelet:
|
kubelet:
|
||||||
extraMounts:
|
extraMounts:
|
||||||
- destination: /var/lib/longhorn
|
- destination: /var/lib/longhorn
|
||||||
@@ -14,9 +11,3 @@ machine:
|
|||||||
- bind
|
- bind
|
||||||
- rshared
|
- rshared
|
||||||
- rw
|
- rw
|
||||||
# NOTE: System extensions need to be added via Talos Image Factory
|
|
||||||
# customization:
|
|
||||||
# systemExtensions:
|
|
||||||
# officialExtensions:
|
|
||||||
# - siderolabs/iscsi-tools
|
|
||||||
# - siderolabs/util-linux-tools
|
|
@@ -34,7 +34,7 @@ sleep 30
|
|||||||
|
|
||||||
# Setup Cloudflare API token for DNS01 challenges
|
# Setup Cloudflare API token for DNS01 challenges
|
||||||
echo "Creating Cloudflare API token secret..."
|
echo "Creating Cloudflare API token secret..."
|
||||||
CLOUDFLARE_API_TOKEN=$(wild-secret cluster.certManager.cloudflare.apiToken) || exit 1
|
CLOUDFLARE_API_TOKEN=$(wild-secret cloudflare.token) || exit 1
|
||||||
kubectl create secret generic cloudflare-api-token \
|
kubectl create secret generic cloudflare-api-token \
|
||||||
--namespace cert-manager \
|
--namespace cert-manager \
|
||||||
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
||||||
|
@@ -14,7 +14,7 @@ data:
|
|||||||
reload
|
reload
|
||||||
template IN A {
|
template IN A {
|
||||||
match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\.
|
match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\.
|
||||||
answer "{{`{{ .Name }}`}} 60 IN A {{ .cluster.loadBalancerIp }}"
|
answer "{{`{{"{{ .Name }}"}}`}} 60 IN A {{ .cluster.loadBalancerIp }}"
|
||||||
}
|
}
|
||||||
template IN AAAA {
|
template IN AAAA {
|
||||||
match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\.
|
match (.*)\.{{ .cloud.internalDomain | strings.ReplaceAll "." "\\." }}\.
|
||||||
|
@@ -21,7 +21,7 @@ kubectl apply -k ${EXTERNALDNS_DIR}/kustomize
|
|||||||
|
|
||||||
# Setup Cloudflare API token secret
|
# Setup Cloudflare API token secret
|
||||||
echo "Creating Cloudflare API token secret..."
|
echo "Creating Cloudflare API token secret..."
|
||||||
CLOUDFLARE_API_TOKEN=$(wild-secret cluster.certManager.cloudflare.apiToken) || exit 1
|
CLOUDFLARE_API_TOKEN=$(wild-secret cloudflare.token) || exit 1
|
||||||
kubectl create secret generic cloudflare-api-token \
|
kubectl create secret generic cloudflare-api-token \
|
||||||
--namespace externaldns \
|
--namespace externaldns \
|
||||||
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
--from-literal=api-token="${CLOUDFLARE_API_TOKEN}" \
|
||||||
|
Reference in New Issue
Block a user