Revise wild-setup-cluster to use a single wild-node-setup to replace node-patch-generate and node-up.

This commit is contained in:
2025-10-01 03:52:16 -07:00
parent c7b29e5954
commit ecdb2f2916
9 changed files with 630 additions and 756 deletions

View File

@@ -5,15 +5,10 @@ set -o pipefail
# Parse arguments
SKIP_INSTALLER=false
SKIP_HARDWARE=false
while [[ $# -gt 0 ]]; do
case $1 in
--skip-installer)
SKIP_INSTALLER=true
shift
;;
--skip-hardware)
SKIP_HARDWARE=true
shift
@@ -26,7 +21,6 @@ while [[ $# -gt 0 ]]; do
echo "Control Options:"
echo " --skip-installer Skip Installer image generation"
echo " --skip-hardware Skip Node hardware detection"
echo " --skip-configs Skip Machine config generation"
echo " -h, --help Show this help message"
echo ""
echo "Prerequisites:"
@@ -54,7 +48,7 @@ done
# Initialize Wild Cloud environment
if [ -z "${WC_ROOT}" ]; then
print "WC_ROOT is not set."
echo "ERROR: WC_ROOT is not set."
exit 1
else
source "${WC_ROOT}/scripts/common.sh"
@@ -136,310 +130,280 @@ fi
# =============================================================================
if [ "${SKIP_HARDWARE}" = false ]; then
print_header "Control node registration"
print_header "Control Plane Node Setup"
# Automatically configure the first three IPs after VIP for control plane nodes
vip_last_octet=$(echo "$vip" | cut -d. -f4)
vip_prefix=$(echo "$vip" | cut -d. -f1-3)
# Process each control plane node
# Set up control plane nodes
for i in 1 2 3; do
NODE_NAME="${HOSTNAME_PREFIX}control-${i}"
TARGET_IP="${vip_prefix}.$(( vip_last_octet + i ))"
print_info "Checking for control plane node: $NODE_NAME (IP: $TARGET_IP)"
if wild-config --check "cluster.nodes.active.${NODE_NAME}.interface"; then
print_success "Node $NODE_NAME already registered."
continue
fi
print_info "Setting up control plane node: $NODE_NAME (IP: $TARGET_IP)"
if ! wild-config --check "cluster.nodes.active.${NODE_NAME}.role"; then
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".role" "controlplane"
fi
if ! wild-config --check "cluster.nodes.active.${NODE_NAME}.targetIp"; then
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".targetIp" "$TARGET_IP"
fi
print_info "${NODE_NAME} not found. Please ensure the node is powered on and running Talos in maintenance mode."
read -p "Is $NODE_NAME in maintenance mode now? (Y/n): " -r register_node
if [[ $register_node =~ ^[Nn]$ ]]; then
print_info "Skipping bringing up node $NODE_NAME registration"
continue
fi
# Detect node hardware
print_info "Attempting detection at target IP $TARGET_IP..."
DETECTION_IP="$TARGET_IP"
NODE_INFO=""
if wild-node-detect "$TARGET_IP" >/dev/null 2>&1; then
NODE_INFO=$(wild-node-detect "$TARGET_IP")
print_success "Node detected at target IP $TARGET_IP"
else
# Fall back to current IP
print_warning "Node not accessible at target IP $TARGET_IP"
read -p "Enter current IP for this node: " -r CURRENT_IP
if [ -z "$CURRENT_IP" ]; then
print_warning "Skipping node $NODE_NAME registration"
continue
fi
print_info "Attempting detection at current IP $CURRENT_IP..."
if wild-node-detect "$CURRENT_IP" >/dev/null 2>&1; then
NODE_INFO=$(wild-node-detect "$CURRENT_IP")
DETECTION_IP="$CURRENT_IP"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".currentIp" "$CURRENT_IP"
print_success "Node detected at current IP $CURRENT_IP"
else
print_error "Failed to detect node at $CURRENT_IP"
continue
fi
fi
if ! [ -n "$NODE_INFO" ]; then
print_error "No hardware information received from node"
continue
fi
INTERFACE=$(echo "$NODE_INFO" | jq -r '.interface')
SELECTED_DISK=$(echo "$NODE_INFO" | jq -r '.selected_disk')
AVAILABLE_DISKS=$(echo "$NODE_INFO" | jq -r '.disks[] | "\(.path) (\((.size / 1000000000) | floor)GB)"' | paste -sd, -)
print_success "Hardware detected:"
print_info " - Interface: $INTERFACE"
print_info " - Available disks: $AVAILABLE_DISKS"
print_info " - Selected disk: $SELECTED_DISK"
# User system disk selection
echo ""
read -p "Use selected disk '$SELECTED_DISK'? (Y/n): " -r use_disk
if [[ $use_disk =~ ^[Nn]$ ]]; then
echo "Available disks:"
echo "$NODE_INFO" | jq -r '.disks[] | "\(.path) (\((.size / 1000000000) | floor)GB)"' | nl -w2 -s') '
read -p "Enter disk number: " -r disk_num
SELECTED_DISK=$(echo "$NODE_INFO" | jq -r ".disks[$((disk_num-1))].path")
if [ "$SELECTED_DISK" = "null" ] || [ -z "$SELECTED_DISK" ]; then
print_error "Invalid disk selection"
continue
fi
print_info "Selected disk: $SELECTED_DISK"
fi
# Update config.yaml with hardware info.
print_info "Updating configuration for $NODE_NAME..."
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".interface" "$INTERFACE"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
# Copy current Talos version and schematic ID to this node
# Pre-configure node role and target IP
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".role" "controlplane"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".targetIp" "$TARGET_IP"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
# The node is now configured. Bring it up.
echo ""
read -p "Bring node $NODE_NAME ($TARGET_IP) up now? (Y/n): " -r apply_config
if [[ ! $apply_config =~ ^[Nn]$ ]]; then
if [ "$DETECTION_IP" != "$TARGET_IP" ]; then
# Node is in maintenance mode, use insecure flag
print_info "Applying configuration in insecure mode (maintenance mode)..."
wild-cluster-node-up "$NODE_NAME" --insecure
else
# Node is already up, no insecure flag needed
print_info "Applying configuration..."
wild-cluster-node-up "$NODE_NAME" --force
fi
# Bootstrap the cluster after the first node is up.
if [ "$i" -eq 1 ]; then
read -p "The cluster should be bootstrapped after the first control node is ready. Is it ready? (Y/n): " -r is_ready
if [[ ! $is_ready =~ ^[Nn]$ ]]; then
print_info "Bootstrapping control plane node $TARGET_IP..."
talosctl config endpoint "$TARGET_IP"
# Attempt to bootstrap the cluster
if talosctl bootstrap --nodes "$TARGET_IP" 2>&1 | tee /tmp/bootstrap_output.log; then
print_success "Control plane node $TARGET_IP bootstrapped successfully!"
else
# Check if the error is because it's already bootstrapped
if grep -q "etcd data directory is not empty\|AlreadyExists" /tmp/bootstrap_output.log; then
print_info "Cluster is already bootstrapped on $TARGET_IP"
else
print_error "Failed to bootstrap control plane node $TARGET_IP"
print_info "Bootstrap output:"
cat /tmp/bootstrap_output.log
rm -f /tmp/bootstrap_output.log
continue
fi
fi
rm -f /tmp/bootstrap_output.log
# Wait for VIP to become available before using it
print_info "Waiting for VIP $vip to become available..."
max_attempts=30
attempt=1
vip_ready=false
while [ $attempt -le $max_attempts ]; do
if ping -c 1 -W 2 "$vip" >/dev/null 2>&1; then
# VIP responds to ping, now test Talos API
if talosctl -e "$vip" -n "$vip" version >/dev/null 2>&1; then
print_success "VIP $vip is ready (attempt $attempt/$max_attempts)"
vip_ready=true
break
fi
fi
print_info "VIP not ready, waiting... (attempt $attempt/$max_attempts)"
sleep 2
attempt=$((attempt + 1))
done
if [ "$vip_ready" = true ]; then
talosctl config endpoint "$vip"
print_info "Talos endpoint set to control plane VIP: $vip"
if talosctl kubeconfig "$vip"; then
print_success "Talos kubeconfig updated for control plane VIP: $vip"
else
print_error "Failed to get kubeconfig from VIP: $vip"
print_info "You can try again later with: talosctl kubeconfig $vip"
fi
else
print_error "VIP $vip did not become available after $max_attempts attempts"
print_warning "Falling back to direct node access"
print_info "Talos endpoint remains set to: $TARGET_IP"
print_info "You can try switching to VIP later with: talosctl config endpoint $vip"
fi
# Check if node is already configured
if wild-config --check "cluster.nodes.active.${NODE_NAME}.interface"; then
print_success "Node $NODE_NAME already configured"
echo ""
read -p "Re-deploy node $NODE_NAME? (y/N): " -r redeploy_node
if [[ $redeploy_node =~ ^[Yy]$ ]]; then
if ! wild-node-setup "$NODE_NAME"; then
print_error "Failed to set up node $NODE_NAME"
continue
fi
else
continue
fi
else
# Node needs initial setup
print_info "Node $NODE_NAME requires hardware detection and setup"
echo ""
read -p "Set up node $NODE_NAME now? (Y/n): " -r setup_node
if [[ $setup_node =~ ^[Nn]$ ]]; then
print_info "Skipping node $NODE_NAME setup"
continue
fi
else
print_info "Configuration not applied. You can apply it later with:"
print_info " wild-cluster-node-up $NODE_NAME --insecure"
# Run complete node setup
if ! wild-node-setup "$NODE_NAME"; then
print_error "Failed to set up node $NODE_NAME"
print_info "You can retry later with: wild-node-setup $NODE_NAME"
continue
fi
fi
# Bootstrap the cluster after the first node is up
if [ "$i" -eq 1 ]; then
echo ""
read -p "Bootstrap the cluster on $NODE_NAME? (Y/n): " -r bootstrap_cluster
if [[ ! $bootstrap_cluster =~ ^[Nn]$ ]]; then
print_header "Bootstrapping Cluster: $NODE_NAME"
talosctl config endpoint "$TARGET_IP"
if talosctl bootstrap --nodes "$TARGET_IP" 2>&1 | tee /tmp/bootstrap_output.log; then
print_success "Cluster bootstrap initiated successfully."
else
if grep -q "etcd data directory is not empty\|AlreadyExists" /tmp/bootstrap_output.log; then
print_info "Cluster is already bootstrapped."
else
print_error "Failed to bootstrap cluster."
print_info "Bootstrap output:"
cat /tmp/bootstrap_output.log
rm -f /tmp/bootstrap_output.log
continue
fi
fi
mv -f /tmp/bootstrap_output.log /tmp/bootstrap_output_success.log
# Step 1: Verify etcd cluster health
print_info -n "Step 1/6: Verifying etcd cluster health."
max_attempts=30
for attempt in $(seq 1 $max_attempts); do
if talosctl -n "$TARGET_IP" etcd status >/dev/null 2>&1; then
echo ""
print_success "etcd cluster is healthy."
break
fi
if [ $attempt -eq $max_attempts ]; then
echo ""
print_error "etcd cluster not healthy after $max_attempts attempts."
print_info "Troubleshooting steps:"
print_info " 1. Check etcd service: talosctl -n $TARGET_IP service etcd"
print_info " 2. Check etcd logs: talosctl -n $TARGET_IP logs etcd"
print_info " 3. Check etcd status details: talosctl -n $TARGET_IP etcd status"
print_info " 4. Verify bootstrap completed: talosctl -n $TARGET_IP get members"
exit 1
fi
printf "."
sleep 10
done
# Step 2: Wait for VIP to be assigned to interface
print_info -n "Step 2/6: Waiting for VIP $vip to be assigned to interface."
max_attempts=90
for attempt in $(seq 1 $max_attempts); do
if talosctl -n "$TARGET_IP" get addresses | grep -q "$vip/32"; then
echo ""
print_success "VIP $vip assigned to interface."
break
fi
if [ $attempt -eq $max_attempts ]; then
echo ""
print_error "VIP $vip was not assigned to interface after $max_attempts attempts"
print_info "Troubleshooting steps:"
print_info " 1. Check VIP controller logs: talosctl -n $TARGET_IP logs controller-runtime | grep vip"
print_info " 2. Check network configuration: talosctl -n $TARGET_IP get addresses"
print_info " 3. Verify VIP is within node's network range"
exit 1
fi
printf "."
sleep 10
done
# Step 3: Wait for control plane components to start
print_info -n "Step 3/6: Waiting for control plane components to start."
max_attempts=60
for attempt in $(seq 1 $max_attempts); do
# Check if all three control plane components are running
apiserver_running=$(talosctl -n "$TARGET_IP" containers -k | grep -c "kube-apiserver.*CONTAINER_RUNNING" || true)
controller_running=$(talosctl -n "$TARGET_IP" containers -k | grep -c "kube-controller-manager.*CONTAINER_RUNNING" || true)
scheduler_running=$(talosctl -n "$TARGET_IP" containers -k | grep -c "kube-scheduler.*CONTAINER_RUNNING" || true)
if [ "$apiserver_running" -gt 0 ] && [ "$controller_running" -gt 0 ] && [ "$scheduler_running" -gt 0 ]; then
echo ""
print_success "All control plane components are running (attempt $attempt)."
break
fi
if [ $attempt -eq $max_attempts ]; then
echo ""
print_error "Control plane components not all running after $max_attempts attempts."
print_info "Troubleshooting steps:"
print_info " 1. Check kubelet logs: talosctl -n $TARGET_IP logs kubelet"
print_info " 2. Check static pod status: talosctl -n $TARGET_IP containers -k | grep kube-"
print_info " 3. Restart kubelet if needed: talosctl -n $TARGET_IP service kubelet restart"
print_info "Current status:"
print_info " API Server running: $apiserver_running"
print_info " Controller Manager running: $controller_running"
print_info " Scheduler running: $scheduler_running"
exit 1
fi
# Restart kubelet every 40 attempts to refresh static pod creation
if [ $((attempt % 40)) -eq 0 ]; then
echo ""
print_info "Restarting kubelet to refresh static pod creation (attempt $attempt)..."
talosctl -n "$TARGET_IP" service kubelet restart > /dev/null 2>&1
print_info -n "Waiting for control plane components after kubelet restart."
sleep 30 # Give kubelet time to restart and create pods
fi
printf "."
sleep 10
done
# Step 4: Wait for API server to respond on VIP
print_info -n "Step 4/6: Waiting for API server to respond on VIP $vip."
max_attempts=60
for attempt in $(seq 1 $max_attempts); do
if curl -k -s --max-time 5 "https://$vip:6443/healthz" >/dev/null 2>&1; then
echo ""
print_success "API server responding on VIP."
break
fi
if [ $attempt -eq $max_attempts ]; then
echo ""
print_error "API server not responding on VIP $vip after $max_attempts attempts."
print_info "Troubleshooting steps:"
print_info " 1. Check API server logs: talosctl -n $TARGET_IP logs kubelet | grep apiserver"
print_info " 2. Check if API server is running: talosctl -n $TARGET_IP containers -k | grep apiserver"
print_info " 3. Test API server on node IP: curl -k https://$TARGET_IP:6443/healthz"
exit 1
fi
# Attempt kubelet restart every 15 attempts to refresh certificates
if [ $((attempt % 15)) -eq 0 ]; then
echo ""
print_info "Restarting kubelet to refresh API container setup (attempt $attempt)..."
talosctl -n "$TARGET_IP" service kubelet restart > /dev/null 2>&1
print_info -n "Waiting for API server to respond after kubelet restart."
sleep 30 # Give kubelet time to restart
fi
printf "."
sleep 10
done
# Step 5: Configure talosctl endpoint and get kubeconfig
print_info "Step 5/6: Configuring cluster access..."
talosctl config endpoint "$vip"
if ! talosctl kubeconfig --nodes "$vip"; then
print_error "Failed to get kubeconfig via VIP."
print_info "Troubleshooting steps:"
print_info " 1. Check API server logs: talosctl -n $TARGET_IP logs kube-apiserver"
print_info " 2. Test API server on node IP: curl -k https://$TARGET_IP:6443/healthz"
print_info " 3. Verify network connectivity to VIP"
exit 1
else
print_success "Kubeconfig retrieved via VIP."
fi
# Step 6: Verify node registration
print_info -n "Step 6/6: Verifying node registration."
for reg_attempt in $(seq 1 10); do
if kubectl get nodes 2>/dev/null | grep -q "Ready\|NotReady"; then
echo ""
print_success "Node registered with API server."
break
fi
echo -n "."
sleep 10
done
if ! kubectl get nodes 2>/dev/null | grep -q "Ready\|NotReady"; then
echo ""
print_error "Node did not register with API server after multiple attempts."
print_info "Troubleshooting steps:"
print_info " 1. Check kubelet logs: talosctl -n $TARGET_IP logs kubelet"
print_info " 2. Check API server logs: talosctl -n $TARGET_IP logs kube-apiserver"
print_info " 3. Verify network connectivity between node and VIP"
exit 1
fi
print_success "Cluster bootstrap completed!"
fi
fi
done
# Register worker nodes
# Worker node setup
echo ""
print_info "Configure worker nodes (optional):"
print_header "Worker Node Setup (Optional)"
WORKER_COUNT=1
while true; do
echo ""
read -p "Do you want to register a worker node? (y/N): " -r register_worker
if [[ $register_worker =~ ^[Yy]$ ]]; then
# Find first available worker number
while [ -n "$(wild-config "cluster.nodes.active.\"${HOSTNAME_PREFIX}worker-${WORKER_COUNT}\".role" 2>/dev/null)" ] && [ "$(wild-config "cluster.nodes.active.\"${HOSTNAME_PREFIX}worker-${WORKER_COUNT}\".role" 2>/dev/null)" != "null" ]; do
read -p "Set up a worker node? (y/N): " -r setup_worker
if [[ $setup_worker =~ ^[Yy]$ ]]; then
# Find next available worker number
while wild-config --check "cluster.nodes.active.${HOSTNAME_PREFIX}worker-${WORKER_COUNT}.role" 2>/dev/null; do
WORKER_COUNT=$((WORKER_COUNT + 1))
done
NODE_NAME="${HOSTNAME_PREFIX}worker-${WORKER_COUNT}"
read -p "Enter current IP for worker node $NODE_NAME: " -r WORKER_IP
read -p "Enter IP address for worker node $NODE_NAME: " -r WORKER_IP
if [ -z "$WORKER_IP" ]; then
print_warning "No IP provided, skipping worker node"
continue
fi
print_info "Running wild-node-detect for worker node $NODE_NAME ($WORKER_IP)..."
# Run detection and capture both output and stderr for debugging
DETECTION_OUTPUT=$(mktemp)
DETECTION_ERROR=$(mktemp)
if wild-node-detect "$WORKER_IP" >"$DETECTION_OUTPUT" 2>"$DETECTION_ERROR"; then
WORKER_INFO=$(cat "$DETECTION_OUTPUT")
print_success "Worker node $NODE_NAME detected at IP $WORKER_IP"
rm -f "$DETECTION_OUTPUT" "$DETECTION_ERROR"
# Pre-configure worker node
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".role" "worker"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".targetIp" "$WORKER_IP"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
# Run complete node setup
if wild-node-setup "$NODE_NAME"; then
print_success "Worker node $NODE_NAME setup completed"
WORKER_COUNT=$((WORKER_COUNT + 1))
else
print_error "Failed to detect hardware for worker node $NODE_NAME ($WORKER_IP)"
print_info "Detection error output:"
cat "$DETECTION_ERROR" >&2
print_info "Make sure the node is running in maintenance mode and accessible"
rm -f "$DETECTION_OUTPUT" "$DETECTION_ERROR"
continue
print_error "Failed to set up worker node $NODE_NAME"
print_info "You can retry later with: wild-node-setup $NODE_NAME"
fi
if [ -n "$WORKER_INFO" ]; then
# Parse JSON response
INTERFACE=$(echo "$WORKER_INFO" | jq -r '.interface')
SELECTED_DISK=$(echo "$WORKER_INFO" | jq -r '.selected_disk')
AVAILABLE_DISKS=$(echo "$WORKER_INFO" | jq -r '.disks[] | "\(.path) (\((.size / 1000000000) | floor)GB)"' | paste -sd, -)
print_success "Hardware detected for worker node $NODE_NAME:"
print_info " - Interface: $INTERFACE"
print_info " - Available disks: $AVAILABLE_DISKS"
print_info " - Selected disk: $SELECTED_DISK"
# Allow user to override disk selection
echo ""
read -p "Use selected disk '$SELECTED_DISK'? (Y/n): " -r use_disk
if [[ $use_disk =~ ^[Nn]$ ]]; then
echo "Available disks:"
echo "$WORKER_INFO" | jq -r '.disks[] | "\(.path) (\((.size / 1000000000) | floor)GB)"' | nl -w2 -s') '
read -p "Enter disk number: " -r disk_num
SELECTED_DISK=$(echo "$WORKER_INFO" | jq -r ".disks[$((disk_num-1))].path")
if [ "$SELECTED_DISK" = "null" ] || [ -z "$SELECTED_DISK" ]; then
print_error "Invalid disk selection"
continue
fi
print_info "Selected disk: $SELECTED_DISK"
fi
# Update config.yaml with worker hardware info
print_info "Updating config.yaml for worker node $NODE_NAME..."
# Store under unified cluster.nodes.active.<node-name>
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".role" "worker"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".currentIp" "$WORKER_IP"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".targetIp" "$WORKER_IP"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".interface" "$INTERFACE"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
# Copy current Talos version and schematic ID to this node
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
print_success "Worker node $NODE_NAME registered successfully:"
print_info " - Name: $NODE_NAME"
print_info " - IP: $WORKER_IP"
print_info " - Interface: $INTERFACE"
print_info " - Disk: $SELECTED_DISK"
# Generate machine config immediately
print_info "Generating machine configuration for $NODE_NAME..."
if wild-cluster-node-patch-generate "$NODE_NAME"; then
print_success "Machine configuration generated for $NODE_NAME"
# Ask if user wants to apply the configuration now
echo ""
read -p "Apply configuration to worker node $NODE_NAME now? (Y/n): " -r apply_config
if [[ $apply_config =~ ^[Yy]$ ]] || [[ -z "$apply_config" ]]; then
# Worker nodes are typically in maintenance mode during setup
print_info "Applying configuration in insecure mode (maintenance mode)..."
wild-cluster-node-up "$NODE_NAME" --insecure
else
print_info "Configuration not applied. You can apply it later with:"
print_info " wild-cluster-node-up $NODE_NAME --insecure"
fi
else
print_warning "Failed to generate machine configuration for $NODE_NAME"
fi
else
print_error "Failed to detect hardware for worker node $NODE_NAME"
continue
fi
WORKER_COUNT=$((WORKER_COUNT + 1))
else
break
fi
done
print_success "Completed Node hardware detection"
echo ""
print_success "Node setup phase completed"
else
print_info "Skipping Node Hardware Detection"
print_info "Skipping node setup (--skip-hardware specified)"
fi
# =============================================================================
@@ -450,3 +414,15 @@ print_header "Wild Cloud Cluster Setup Complete!"
print_success "Cluster infrastructure setup completed!"
echo ""
print_info "Next steps:"
echo " 1. Run 'wild-setup-services' to install cluster services"
echo " 2. Verify nodes are ready: kubectl get nodes"
echo " 3. Check cluster health: wild-health"
echo ""
print_info "Individual node management:"
echo " - Setup additional nodes: wild-node-setup <node-name>"
echo " - Re-detect hardware: wild-node-setup <node-name> --detect"
echo " - Configuration only: wild-node-setup <node-name> --no-deploy"
echo ""
print_success "Wild Cloud cluster setup completed!"