Full cloud setup test run #1 fixes.
This commit is contained in:
@@ -58,10 +58,8 @@ fi
|
||||
|
||||
print_header "Talos Cluster Configuration Generation"
|
||||
|
||||
# Ensure required directories exist
|
||||
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||
|
||||
# Check if generated directory already exists and has content
|
||||
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||
if [ -d "${NODE_SETUP_DIR}/generated" ] && [ "$(ls -A "${NODE_SETUP_DIR}/generated" 2>/dev/null)" ] && [ "$FORCE" = false ]; then
|
||||
print_success "Cluster configuration already exists in ${NODE_SETUP_DIR}/generated/"
|
||||
print_info "Skipping cluster configuration generation"
|
||||
@@ -77,8 +75,6 @@ if [ -d "${NODE_SETUP_DIR}/generated" ]; then
|
||||
rm -rf "${NODE_SETUP_DIR}/generated"
|
||||
fi
|
||||
mkdir -p "${NODE_SETUP_DIR}/generated"
|
||||
talosctl gen secrets
|
||||
print_info "New secrets will be generated in ${NODE_SETUP_DIR}/generated/"
|
||||
|
||||
# Ensure we have the configuration we need.
|
||||
|
||||
@@ -94,9 +90,8 @@ print_info "Cluster name: $CLUSTER_NAME"
|
||||
print_info "Control plane endpoint: https://$VIP:6443"
|
||||
|
||||
cd "${NODE_SETUP_DIR}/generated"
|
||||
talosctl gen secrets
|
||||
talosctl gen config --with-secrets secrets.yaml "$CLUSTER_NAME" "https://$VIP:6443"
|
||||
cd - >/dev/null
|
||||
|
||||
# Verify generated files
|
||||
|
||||
print_success "Cluster configuration generation completed!"
|
@@ -51,76 +51,32 @@ else
|
||||
init_wild_env
|
||||
fi
|
||||
|
||||
# Check for required configuration
|
||||
if [ -z "$(wild-config "cluster.nodes.talos.version")" ] || [ -z "$(wild-config "cluster.nodes.talos.schematicId")" ]; then
|
||||
print_header "Talos Configuration Required"
|
||||
print_error "Missing required Talos configuration"
|
||||
print_info "Please run 'wild-setup' first to configure your cluster"
|
||||
print_info "Or set the required configuration manually:"
|
||||
print_info " wild-config-set cluster.nodes.talos.version v1.10.4"
|
||||
print_info " wild-config-set cluster.nodes.talos.schematicId YOUR_SCHEMATIC_ID"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# INSTALLER IMAGE GENERATION AND ASSET DOWNLOADING
|
||||
# =============================================================================
|
||||
|
||||
print_header "Talos Installer Image Generation and Asset Download"
|
||||
print_header "Talos asset download"
|
||||
|
||||
# Get Talos version and schematic ID from config
|
||||
TALOS_VERSION=$(wild-config cluster.nodes.talos.version)
|
||||
SCHEMATIC_ID=$(wild-config cluster.nodes.talos.schematicId)
|
||||
# Talos version
|
||||
prompt_if_unset_config "cluster.nodes.talos.version" "Talos version" "v1.11.0"
|
||||
TALOS_VERSION=$(wild-config "cluster.nodes.talos.version")
|
||||
|
||||
# Talos schematic ID
|
||||
prompt_if_unset_config "cluster.nodes.talos.schematicId" "Talos schematic ID" "56774e0894c8a3a3a9834a2aea65f24163cacf9506abbcbdc3ba135eaca4953f"
|
||||
SCHEMATIC_ID=$(wild-config "cluster.nodes.talos.schematicId")
|
||||
|
||||
print_info "Creating custom Talos installer image..."
|
||||
print_info "Talos version: $TALOS_VERSION"
|
||||
|
||||
# Validate schematic ID
|
||||
if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "null" ]; then
|
||||
print_error "No schematic ID found in config.yaml"
|
||||
print_info "Please run 'wild-setup' first to configure your cluster"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_info "Schematic ID: $SCHEMATIC_ID"
|
||||
|
||||
if [ -f "${WC_HOME}/config.yaml" ] && yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions' "${WC_HOME}/config.yaml" >/dev/null 2>&1; then
|
||||
echo ""
|
||||
print_info "Schematic includes:"
|
||||
yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "${WC_HOME}/config.yaml" | sed 's/^/ - /' || true
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Generate installer image URL
|
||||
INSTALLER_URL="factory.talos.dev/metal-installer/$SCHEMATIC_ID:$TALOS_VERSION"
|
||||
|
||||
print_success "Custom installer image URL generated!"
|
||||
echo ""
|
||||
print_info "Installer URL: $INSTALLER_URL"
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ASSET DOWNLOADING AND CACHING
|
||||
# =============================================================================
|
||||
|
||||
print_header "Downloading and Caching PXE Boot Assets"
|
||||
|
||||
# Create cache directories organized by schematic ID
|
||||
CACHE_DIR="${WC_HOME}/.wildcloud"
|
||||
SCHEMATIC_CACHE_DIR="${CACHE_DIR}/node-boot-assets/${SCHEMATIC_ID}"
|
||||
PXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/pxe"
|
||||
IPXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/ipxe"
|
||||
ISO_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/iso"
|
||||
mkdir -p "$PXE_CACHE_DIR/amd64"
|
||||
mkdir -p "$IPXE_CACHE_DIR"
|
||||
mkdir -p "$ISO_CACHE_DIR"
|
||||
|
||||
# Download Talos kernel and initramfs for PXE boot
|
||||
print_info "Downloading Talos PXE assets..."
|
||||
KERNEL_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/kernel-amd64"
|
||||
INITRAMFS_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/initramfs-amd64.xz"
|
||||
|
||||
KERNEL_PATH="${PXE_CACHE_DIR}/amd64/vmlinuz"
|
||||
INITRAMFS_PATH="${PXE_CACHE_DIR}/amd64/initramfs.xz"
|
||||
print_header "Downloading and caching boot assets"
|
||||
|
||||
# Function to download with progress
|
||||
download_asset() {
|
||||
@@ -129,17 +85,19 @@ download_asset() {
|
||||
local description="$3"
|
||||
|
||||
if [ -f "$path" ]; then
|
||||
print_info "$description already cached at $path"
|
||||
print_success "$description already cached at $path"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Downloading $description..."
|
||||
print_info "URL: $url"
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
wget --progress=bar:force -O "$path" "$url"
|
||||
elif command -v curl >/dev/null 2>&1; then
|
||||
curl -L --progress-bar -o "$path" "$url"
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -L -o "$path" "$url" \
|
||||
--progress-bar \
|
||||
--write-out "✓ Downloaded %{size_download} bytes at %{speed_download} B/s\n"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget --progress=bar:force:noscroll -O "$path" "$url"
|
||||
else
|
||||
print_error "Neither wget nor curl is available for downloading"
|
||||
return 1
|
||||
@@ -153,42 +111,51 @@ download_asset() {
|
||||
fi
|
||||
|
||||
print_success "$description downloaded successfully"
|
||||
echo
|
||||
}
|
||||
|
||||
# Download Talos PXE assets
|
||||
CACHE_DIR="${WC_HOME}/.wildcloud"
|
||||
SCHEMATIC_CACHE_DIR="${CACHE_DIR}/node-boot-assets/${SCHEMATIC_ID}"
|
||||
PXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/pxe"
|
||||
IPXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/ipxe"
|
||||
ISO_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/iso"
|
||||
mkdir -p "$PXE_CACHE_DIR/amd64"
|
||||
mkdir -p "$IPXE_CACHE_DIR"
|
||||
mkdir -p "$ISO_CACHE_DIR"
|
||||
|
||||
# Download Talos kernel and initramfs for PXE boot
|
||||
KERNEL_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/kernel-amd64"
|
||||
KERNEL_PATH="${PXE_CACHE_DIR}/amd64/vmlinuz"
|
||||
download_asset "$KERNEL_URL" "$KERNEL_PATH" "Talos kernel"
|
||||
|
||||
INITRAMFS_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/initramfs-amd64.xz"
|
||||
INITRAMFS_PATH="${PXE_CACHE_DIR}/amd64/initramfs.xz"
|
||||
download_asset "$INITRAMFS_URL" "$INITRAMFS_PATH" "Talos initramfs"
|
||||
|
||||
# Download iPXE bootloader files
|
||||
print_info "Downloading iPXE bootloader assets..."
|
||||
download_asset "http://boot.ipxe.org/ipxe.efi" "${IPXE_CACHE_DIR}/ipxe.efi" "iPXE EFI bootloader"
|
||||
download_asset "http://boot.ipxe.org/undionly.kpxe" "${IPXE_CACHE_DIR}/undionly.kpxe" "iPXE BIOS bootloader"
|
||||
download_asset "http://boot.ipxe.org/arm64-efi/ipxe.efi" "${IPXE_CACHE_DIR}/ipxe-arm64.efi" "iPXE ARM64 EFI bootloader"
|
||||
|
||||
# Download Talos ISO
|
||||
print_info "Downloading Talos ISO..."
|
||||
ISO_URL="https://factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/metal-amd64.iso"
|
||||
ISO_FILENAME="talos-${TALOS_VERSION}-metal-amd64.iso"
|
||||
ISO_PATH="${ISO_CACHE_DIR}/${ISO_FILENAME}"
|
||||
ISO_PATH="${ISO_CACHE_DIR}/talos-${TALOS_VERSION}-metal-amd64.iso"
|
||||
download_asset "$ISO_URL" "$ISO_PATH" "Talos ISO"
|
||||
|
||||
echo ""
|
||||
print_success "All assets downloaded and cached!"
|
||||
echo ""
|
||||
print_info "Cached assets for schematic $SCHEMATIC_ID:"
|
||||
echo " Talos kernel: $KERNEL_PATH"
|
||||
echo " Talos initramfs: $INITRAMFS_PATH"
|
||||
echo " Talos ISO: $ISO_PATH"
|
||||
echo " iPXE EFI: ${IPXE_CACHE_DIR}/ipxe.efi"
|
||||
echo " iPXE BIOS: ${IPXE_CACHE_DIR}/undionly.kpxe"
|
||||
echo " iPXE ARM64: ${IPXE_CACHE_DIR}/ipxe-arm64.efi"
|
||||
print_header "Summary"
|
||||
print_success "Cached assets for schematic $SCHEMATIC_ID:"
|
||||
echo "- Talos kernel: $KERNEL_PATH"
|
||||
echo "- Talos initramfs: $INITRAMFS_PATH"
|
||||
echo "- Talos ISO: $ISO_PATH"
|
||||
echo "- iPXE EFI: ${IPXE_CACHE_DIR}/ipxe.efi"
|
||||
echo "- iPXE BIOS: ${IPXE_CACHE_DIR}/undionly.kpxe"
|
||||
echo "- iPXE ARM64: ${IPXE_CACHE_DIR}/ipxe-arm64.efi"
|
||||
echo ""
|
||||
print_info "Cache location: $SCHEMATIC_CACHE_DIR"
|
||||
echo ""
|
||||
print_info "Use these assets for:"
|
||||
echo " - PXE boot: Use kernel and initramfs from cache"
|
||||
echo " - USB creation: Use ISO file for dd or imaging tools"
|
||||
echo " Example: sudo dd if=$ISO_PATH of=/dev/sdX bs=4M status=progress"
|
||||
echo " - Custom installer: https://$INSTALLER_URL"
|
||||
echo "- PXE boot: Use kernel and initramfs from cache"
|
||||
echo "- USB creation: Use ISO file for dd or imaging tools"
|
||||
echo " Example: sudo dd if=$ISO_PATH of=/dev/sdX bs=4M status=progress"
|
||||
echo "- Custom installer: https://$INSTALLER_URL"
|
||||
echo ""
|
||||
print_success "Installer image generation and asset caching completed!"
|
@@ -96,7 +96,7 @@ else
|
||||
init_wild_env
|
||||
fi
|
||||
|
||||
print_header "Talos Node Configuration Application"
|
||||
print_header "Talos node configuration"
|
||||
|
||||
# Check if the specified node is registered
|
||||
NODE_INTERFACE=$(yq eval ".cluster.nodes.active.\"${NODE_NAME}\".interface" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||
@@ -156,10 +156,7 @@ PATCH_FILE="${NODE_SETUP_DIR}/patch/${NODE_NAME}.yaml"
|
||||
|
||||
# Check if patch file exists
|
||||
if [ ! -f "$PATCH_FILE" ]; then
|
||||
print_error "Patch file not found: $PATCH_FILE"
|
||||
print_info "Generate the patch file first:"
|
||||
print_info " wild-cluster-node-patch-generate $NODE_NAME"
|
||||
exit 1
|
||||
wild-cluster-node-patch-generate "$NODE_NAME"
|
||||
fi
|
||||
|
||||
# Determine base config file
|
||||
|
@@ -112,48 +112,6 @@ for item in "$SOURCE_DIR"/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
# Then, process each service directory in the source
|
||||
print_info "Processing service directories..."
|
||||
for service_dir in "$SOURCE_DIR"/*; do
|
||||
if [ ! -d "$service_dir" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
service_name=$(basename "$service_dir")
|
||||
dest_service_dir="$DEST_DIR/$service_name"
|
||||
|
||||
print_info "Processing service: $service_name"
|
||||
|
||||
# Create destination service directory
|
||||
mkdir -p "$dest_service_dir"
|
||||
|
||||
# Copy all files except kustomize.template directory
|
||||
for item in "$service_dir"/*; do
|
||||
item_name=$(basename "$item")
|
||||
|
||||
if [ "$item_name" = "kustomize.template" ]; then
|
||||
# Compile kustomize.template to kustomize directory
|
||||
if [ -d "$item" ]; then
|
||||
print_info " Compiling kustomize templates for $service_name"
|
||||
wild-compile-template-dir --clean "$item" "$dest_service_dir/kustomize"
|
||||
fi
|
||||
else
|
||||
# Copy other files as-is (install.sh, README.md, etc.)
|
||||
if [ -f "$item" ]; then
|
||||
# Compile individual template files
|
||||
if grep -q "{{" "$item" 2>/dev/null; then
|
||||
print_info " Compiling: ${item_name}"
|
||||
wild-compile-template < "$item" > "$dest_service_dir/$item_name"
|
||||
else
|
||||
cp "$item" "$dest_service_dir/$item_name"
|
||||
fi
|
||||
elif [ -d "$item" ]; then
|
||||
cp -r "$item" "$dest_service_dir/"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
print_success "Cluster setup files copied and compiled"
|
||||
|
||||
# Verify required configuration
|
||||
|
@@ -20,7 +20,6 @@ usage() {
|
||||
echo "Examples:"
|
||||
echo " wild-cluster-services-up # Install all services"
|
||||
echo " wild-cluster-services-up metallb traefik # Install specific services"
|
||||
echo " wild-cluster-services-up --list # List available services"
|
||||
echo ""
|
||||
echo "Available services (when setup files exist):"
|
||||
echo " metallb, longhorn, traefik, coredns, cert-manager,"
|
||||
@@ -43,10 +42,6 @@ while [[ $# -gt 0 ]]; do
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--list)
|
||||
LIST_SERVICES=true
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
@@ -81,38 +76,6 @@ if [ ! -d "$CLUSTER_SETUP_DIR" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Function to get available services
|
||||
get_available_services() {
|
||||
local services=()
|
||||
for service_dir in "$CLUSTER_SETUP_DIR"/*; do
|
||||
if [ -d "$service_dir" ] && [ -f "$service_dir/install.sh" ]; then
|
||||
services+=($(basename "$service_dir"))
|
||||
fi
|
||||
done
|
||||
echo "${services[@]}"
|
||||
}
|
||||
|
||||
# List services if requested
|
||||
if [ "$LIST_SERVICES" = true ]; then
|
||||
print_header "Available Cluster Services"
|
||||
AVAILABLE_SERVICES=($(get_available_services))
|
||||
|
||||
if [ ${#AVAILABLE_SERVICES[@]} -eq 0 ]; then
|
||||
print_warning "No services found in $CLUSTER_SETUP_DIR"
|
||||
print_info "Run 'wild-cluster-services-generate' first"
|
||||
else
|
||||
print_info "Services available for installation:"
|
||||
for service in "${AVAILABLE_SERVICES[@]}"; do
|
||||
if [ -f "$CLUSTER_SETUP_DIR/$service/install.sh" ]; then
|
||||
print_success " ✓ $service"
|
||||
else
|
||||
print_warning " ✗ $service (install.sh missing)"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# CLUSTER SERVICES INSTALLATION
|
||||
# =============================================================================
|
||||
@@ -151,28 +114,11 @@ else
|
||||
print_info "Installing all available services"
|
||||
fi
|
||||
|
||||
# Filter to only include services that actually exist
|
||||
EXISTING_SERVICES=()
|
||||
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||
if [ -d "$CLUSTER_SETUP_DIR/$service" ] && [ -f "$CLUSTER_SETUP_DIR/$service/install.sh" ]; then
|
||||
EXISTING_SERVICES+=("$service")
|
||||
elif [ ${#SPECIFIC_SERVICES[@]} -gt 0 ]; then
|
||||
# Only warn if user specifically requested this service
|
||||
print_warning "Service '$service' not found or missing install.sh"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#EXISTING_SERVICES[@]} -eq 0 ]; then
|
||||
print_error "No installable services found"
|
||||
print_info "Run 'wild-cluster-services-generate' first to generate setup files"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_info "Services to install: ${EXISTING_SERVICES[*]}"
|
||||
print_info "Services to install: ${SERVICES_TO_INSTALL[*]}"
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_info "DRY RUN - would install the following services:"
|
||||
for service in "${EXISTING_SERVICES[@]}"; do
|
||||
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||
print_info " - $service: $CLUSTER_SETUP_DIR/$service/install.sh"
|
||||
done
|
||||
exit 0
|
||||
@@ -183,10 +129,43 @@ cd "$CLUSTER_SETUP_DIR"
|
||||
INSTALLED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
for service in "${EXISTING_SERVICES[@]}"; do
|
||||
SOURCE_DIR="${WC_ROOT}/setup/cluster-services"
|
||||
|
||||
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||
echo ""
|
||||
print_header "Installing $service"
|
||||
|
||||
# Copy templates
|
||||
source_service_dir="$SOURCE_DIR/$service"
|
||||
dest_service_dir="$CLUSTER_SETUP_DIR/$service"
|
||||
|
||||
print_info "Processing service: $service"
|
||||
|
||||
mkdir -p "$dest_service_dir"
|
||||
|
||||
# FIXME: Template compilation needs to be AFTER the configuration steps in the install.sh scripts!
|
||||
for item in "$source_service_dir"/*; do
|
||||
item_name=$(basename "$item")
|
||||
if [ "$item_name" = "kustomize.template" ]; then
|
||||
if [ -d "$item" ]; then
|
||||
print_info " Compiling kustomize templates for $service"
|
||||
wild-compile-template-dir --clean "$item" "$dest_service_dir/kustomize"
|
||||
fi
|
||||
else
|
||||
if [ -f "$item" ]; then
|
||||
if grep -q "{{" "$item" 2>/dev/null; then
|
||||
print_info " Compiling: ${item_name}"
|
||||
wild-compile-template < "$item" > "$dest_service_dir/$item_name"
|
||||
else
|
||||
cp "$item" "$dest_service_dir/$item_name"
|
||||
fi
|
||||
elif [ -d "$item" ]; then
|
||||
cp -r "$item" "$dest_service_dir/"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Run installation script.
|
||||
if [ -f "./$service/install.sh" ]; then
|
||||
print_info "Running $service installation..."
|
||||
if ./"$service"/install.sh; then
|
||||
@@ -219,13 +198,13 @@ if [ $INSTALLED_COUNT -gt 0 ]; then
|
||||
echo " 2. Check service status with: kubectl get services --all-namespaces"
|
||||
|
||||
# Service-specific next steps
|
||||
if [[ " ${EXISTING_SERVICES[*]} " =~ " kubernetes-dashboard " ]]; then
|
||||
if [[ " ${SERVICES_TO_INSTALL[*]} " =~ " kubernetes-dashboard " ]]; then
|
||||
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain 2>/dev/null || echo "your-internal-domain")
|
||||
echo " 3. Access dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
||||
echo " 4. Get dashboard token with: ${WC_ROOT}/bin/dashboard-token"
|
||||
fi
|
||||
|
||||
if [[ " ${EXISTING_SERVICES[*]} " =~ " cert-manager " ]]; then
|
||||
if [[ " ${SERVICES_TO_INSTALL[*]} " =~ " cert-manager " ]]; then
|
||||
echo " 3. Check cert-manager: kubectl get clusterissuers"
|
||||
fi
|
||||
fi
|
||||
|
@@ -73,11 +73,9 @@ CONFIG_FILE="${WC_HOME}/config.yaml"
|
||||
|
||||
# Create config file if it doesn't exist
|
||||
if [ ! -f "${CONFIG_FILE}" ]; then
|
||||
echo "Creating new config file at ${CONFIG_FILE}"
|
||||
print_info "Creating new config file at ${CONFIG_FILE}"
|
||||
echo "{}" > "${CONFIG_FILE}"
|
||||
fi
|
||||
|
||||
# Use yq to set the value in the YAML file
|
||||
yq eval ".${KEY_PATH} = \"${VALUE}\"" -i "${CONFIG_FILE}"
|
||||
|
||||
echo "Set ${KEY_PATH} = ${VALUE}"
|
@@ -48,6 +48,33 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if directory has any files (including hidden files, excluding . and .. and .git)
|
||||
if [ "${UPDATE}" = false ]; then
|
||||
if [ -n "$(find . -maxdepth 1 -name ".*" -o -name "*" | grep -v "^\.$" | grep -v "^\.\.$" | grep -v "^\./\.git$" | head -1)" ]; then
|
||||
NC='\033[0m' # No Color
|
||||
YELLOW='\033[1;33m' # Yellow
|
||||
echo -e "${YELLOW}WARNING:${NC} Directory is not empty."
|
||||
read -p "Do you want to overwrite existing files? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
confirm="yes"
|
||||
else
|
||||
confirm="no"
|
||||
fi
|
||||
if [ "$confirm" != "yes" ]; then
|
||||
echo "Aborting setup. Please run this script in an empty directory."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Initialize .wildcloud directory if it doesn't exist.
|
||||
if [ ! -d ".wildcloud" ]; then
|
||||
mkdir -p ".wildcloud"
|
||||
UPDATE=true
|
||||
echo "Created '.wildcloud' directory."
|
||||
fi
|
||||
|
||||
# Initialize Wild Cloud environment
|
||||
if [ -z "${WC_ROOT}" ]; then
|
||||
echo "WC_ROOT is not set."
|
||||
@@ -56,12 +83,10 @@ else
|
||||
source "${WC_ROOT}/scripts/common.sh"
|
||||
fi
|
||||
|
||||
|
||||
# Initialize .wildcloud directory if it doesn't exist.
|
||||
if [ ! -d ".wildcloud" ]; then
|
||||
mkdir -p ".wildcloud"
|
||||
UPDATE=true
|
||||
echo "Created '.wildcloud' directory."
|
||||
# Initialize config.yaml if it doesn't exist.
|
||||
if [ ! -f "config.yaml" ]; then
|
||||
touch "config.yaml"
|
||||
echo "Created 'config.yaml' file."
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
@@ -84,46 +109,21 @@ if [ -z "$current_cluster_name" ] || [ "$current_cluster_name" = "null" ]; then
|
||||
print_info "Set cluster name to: ${cluster_name}"
|
||||
fi
|
||||
|
||||
# Check if current directory is empty for new cloud
|
||||
if [ "${UPDATE}" = false ]; then
|
||||
# Check if directory has any files (including hidden files, excluding . and .. and .git)
|
||||
if [ -n "$(find . -maxdepth 1 -name ".*" -o -name "*" | grep -v "^\.$" | grep -v "^\.\.$" | grep -v "^\./\.git$" | grep -v "^\./\.wildcloud$"| head -1)" ]; then
|
||||
echo "Warning: Current directory is not empty."
|
||||
read -p "Do you want to overwrite existing files? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
confirm="yes"
|
||||
else
|
||||
confirm="no"
|
||||
fi
|
||||
if [ "$confirm" != "yes" ]; then
|
||||
echo "Aborting setup. Please run this script in an empty directory."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# COPY SCAFFOLD
|
||||
# =============================================================================
|
||||
|
||||
# Copy cloud files to current directory only if they do not exist.
|
||||
# Ignore files that already exist.
|
||||
SRC_DIR="${WC_ROOT}/setup/home-scaffold"
|
||||
rsync -av --ignore-existing --exclude=".git" "${SRC_DIR}/" ./ > /dev/null
|
||||
|
||||
print_success "Ready for cluster setup!"
|
||||
|
||||
# =============================================================================
|
||||
# COMPLETION
|
||||
# COPY DOCS
|
||||
# =============================================================================
|
||||
|
||||
print_header "Wild Cloud Scaffold Setup Complete! Welcome to Wild Cloud!"
|
||||
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Set up your Kubernetes cluster:"
|
||||
echo " wild-setup-cluster"
|
||||
echo ""
|
||||
echo " 2. Install cluster services:"
|
||||
echo " wild-setup-services"
|
||||
echo ""
|
||||
echo "Or run the complete setup:"
|
||||
echo " wild-setup"
|
||||
wild-update-docs --force
|
||||
|
||||
print_success "Wild Cloud initialized! Welcome to Wild Cloud!"
|
@@ -11,14 +11,6 @@ SKIP_SERVICES=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--skip-scaffold)
|
||||
SKIP_SCAFFOLD=true
|
||||
shift
|
||||
;;
|
||||
--skip-docs)
|
||||
SKIP_DOCS=true
|
||||
shift
|
||||
;;
|
||||
--skip-cluster)
|
||||
SKIP_CLUSTER=true
|
||||
shift
|
||||
@@ -80,55 +72,12 @@ else
|
||||
fi
|
||||
|
||||
print_header "Wild Cloud Setup"
|
||||
print_info "Running complete Wild Cloud setup."
|
||||
echo ""
|
||||
|
||||
# =============================================================================
|
||||
# WC_HOME SCAFFOLDING
|
||||
# =============================================================================
|
||||
|
||||
if [ "${SKIP_SCAFFOLD}" = false ]; then
|
||||
print_header "Cloud Home Setup"
|
||||
print_info "Scaffolding your cloud home..."
|
||||
|
||||
if wild-setup-scaffold; then
|
||||
print_success "Cloud home setup completed"
|
||||
else
|
||||
print_error "Cloud home setup failed"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
else
|
||||
print_info "Skipping Home Setup"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# DOCS
|
||||
# =============================================================================
|
||||
|
||||
if [ "${SKIP_DOCS}" = false ]; then
|
||||
print_header "Cloud Docs"
|
||||
print_info "Preparing your docs..."
|
||||
|
||||
if wild-setup-docs; then
|
||||
print_success "Cloud docs setup completed"
|
||||
else
|
||||
print_error "Cloud docs setup failed"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
else
|
||||
print_info "Skipping Docs Setup"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# CLUSTER SETUP
|
||||
# =============================================================================
|
||||
|
||||
if [ "${SKIP_CLUSTER}" = false ]; then
|
||||
print_header "Cluster Setup"
|
||||
print_info "Running wild-setup-cluster..."
|
||||
|
||||
if wild-setup-cluster; then
|
||||
print_success "Cluster setup completed"
|
||||
else
|
||||
@@ -145,9 +94,6 @@ fi
|
||||
# =============================================================================
|
||||
|
||||
if [ "${SKIP_SERVICES}" = false ]; then
|
||||
print_header "Services Setup"
|
||||
print_info "Running wild-setup-services..."
|
||||
|
||||
if wild-setup-services; then
|
||||
print_success "Services setup completed"
|
||||
else
|
||||
|
@@ -62,40 +62,6 @@ else
|
||||
fi
|
||||
|
||||
print_header "Wild Cloud Cluster Setup"
|
||||
print_info "Setting up cluster infrastructure"
|
||||
echo ""
|
||||
|
||||
# Generate initial cluster configuration
|
||||
|
||||
if ! wild-cluster-config-generate; then
|
||||
print_error "Failed to generate cluster configuration"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure Talos cli with our new cluster context
|
||||
|
||||
CLUSTER_NAME=$(wild-config "cluster.name")
|
||||
HAS_CONTEXT=$(talosctl config contexts | grep -c "$CLUSTER_NAME" || true)
|
||||
if [ "$HAS_CONTEXT" -eq 0 ]; then
|
||||
print_info "No Talos context found for cluster $CLUSTER_NAME, creating..."
|
||||
talosctl config merge ${WC_HOME}/setup/cluster-nodes/generated/talosconfig
|
||||
talosctl config use "$CLUSTER_NAME"
|
||||
print_success "Talos context for $CLUSTER_NAME created and set as current"
|
||||
fi
|
||||
|
||||
# Talos asset download
|
||||
|
||||
if [ "${SKIP_INSTALLER}" = false ]; then
|
||||
print_header "Installer Image Generation"
|
||||
|
||||
print_info "Running wild-cluster-node-boot-assets-download..."
|
||||
wild-cluster-node-boot-assets-download
|
||||
|
||||
print_success "Installer image generated"
|
||||
echo ""
|
||||
else
|
||||
print_info "Skipping: Installer Image Generation"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# Configuration
|
||||
@@ -103,6 +69,9 @@ fi
|
||||
|
||||
prompt_if_unset_config "operator.email" "Operator email address"
|
||||
|
||||
prompt_if_unset_config "cluster.name" "Cluster name" "wild-cluster"
|
||||
CLUSTER_NAME=$(wild-config "cluster.name")
|
||||
|
||||
# Configure hostname prefix for unique node names on LAN
|
||||
prompt_if_unset_config "cluster.hostnamePrefix" "Hostname prefix (optional, e.g. 'test-' for unique names on LAN)" ""
|
||||
HOSTNAME_PREFIX=$(wild-config "cluster.hostnamePrefix")
|
||||
@@ -123,41 +92,41 @@ prompt_if_unset_config "cluster.ipAddressPool" "MetalLB IP address pool" "${SUBN
|
||||
ip_pool=$(wild-config "cluster.ipAddressPool")
|
||||
|
||||
# Load balancer IP (automatically set to first address in the pool if not set)
|
||||
current_lb_ip=$(wild-config "cluster.loadBalancerIp")
|
||||
if [ -z "$current_lb_ip" ] || [ "$current_lb_ip" = "null" ]; then
|
||||
lb_ip=$(echo "${ip_pool}" | cut -d'-' -f1)
|
||||
wild-config-set "cluster.loadBalancerIp" "${lb_ip}"
|
||||
print_info "Set load balancer IP to: ${lb_ip} (first IP in MetalLB pool)"
|
||||
fi
|
||||
default_lb_ip=$(echo "${ip_pool}" | cut -d'-' -f1)
|
||||
prompt_if_unset_config "cluster.loadBalancerIp" "Load balancer IP" "${default_lb_ip}"
|
||||
|
||||
# Talos version
|
||||
prompt_if_unset_config "cluster.nodes.talos.version" "Talos version" "v1.10.4"
|
||||
prompt_if_unset_config "cluster.nodes.talos.version" "Talos version" "v1.11.0"
|
||||
talos_version=$(wild-config "cluster.nodes.talos.version")
|
||||
|
||||
# Talos schematic ID
|
||||
current_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
||||
if [ -z "$current_schematic_id" ] || [ "$current_schematic_id" = "null" ]; then
|
||||
echo ""
|
||||
print_info "Get your Talos schematic ID from: https://factory.talos.dev/"
|
||||
print_info "This customizes Talos with the drivers needed for your hardware."
|
||||
|
||||
# Use current schematic ID from config as default
|
||||
default_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
||||
if [ -n "$default_schematic_id" ] && [ "$default_schematic_id" != "null" ]; then
|
||||
print_info "Using schematic ID from config for Talos $talos_version"
|
||||
else
|
||||
default_schematic_id=""
|
||||
fi
|
||||
|
||||
schematic_id=$(prompt_with_default "Talos schematic ID" "${default_schematic_id}" "${current_schematic_id}")
|
||||
wild-config-set "cluster.nodes.talos.schematicId" "${schematic_id}"
|
||||
fi
|
||||
prompt_if_unset_config "cluster.nodes.talos.schematicId" "Talos schematic ID" "56774e0894c8a3a3a9834a2aea65f24163cacf9506abbcbdc3ba135eaca4953f"
|
||||
schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
||||
|
||||
# External DNS
|
||||
cluster_name=$(wild-config "cluster.name")
|
||||
prompt_if_unset_config "cluster.externalDns.ownerId" "External DNS owner ID" "external-dns-${cluster_name}"
|
||||
prompt_if_unset_config "cluster.externalDns.ownerId" "External DNS owner ID" "external-dns-${CLUSTER_NAME}"
|
||||
|
||||
# =============================================================================
|
||||
# TALOS CLUSTER CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
prompt_if_unset_config "cluster.nodes.control.vip" "Control plane virtual IP" "${SUBNET_PREFIX}.90"
|
||||
vip=$(wild-config "cluster.nodes.control.vip")
|
||||
|
||||
# Generate initial cluster configuration
|
||||
if ! wild-cluster-config-generate; then
|
||||
print_error "Failed to generate cluster configuration"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure Talos cli with our new cluster context
|
||||
HAS_CONTEXT=$(talosctl config contexts | grep -c "$CLUSTER_NAME" || true)
|
||||
if [ "$HAS_CONTEXT" -eq 0 ]; then
|
||||
print_info "No Talos context found for cluster $CLUSTER_NAME, creating..."
|
||||
talosctl config merge ${WC_HOME}/setup/cluster-nodes/generated/talosconfig
|
||||
talosctl config context "$CLUSTER_NAME"
|
||||
print_success "Talos context for $CLUSTER_NAME created and set as current"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# Node setup
|
||||
@@ -167,12 +136,6 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
||||
|
||||
print_header "Control Plane Configuration"
|
||||
|
||||
print_info "Configure control plane nodes (you need at least 3 for HA):"
|
||||
echo ""
|
||||
|
||||
prompt_if_unset_config "cluster.nodes.control.vip" "Control plane virtual IP" "${SUBNET_PREFIX}.90"
|
||||
vip=$(wild-config "cluster.nodes.control.vip")
|
||||
|
||||
# Automatically configure the first three IPs after VIP for control plane nodes
|
||||
vip_last_octet=$(echo "$vip" | cut -d. -f4)
|
||||
vip_prefix=$(echo "$vip" | cut -d. -f1-3)
|
||||
@@ -184,7 +147,6 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
||||
for i in 1 2 3; do
|
||||
NODE_NAME="${HOSTNAME_PREFIX}control-${i}"
|
||||
TARGET_IP="${vip_prefix}.$(( vip_last_octet + i ))"
|
||||
echo ""
|
||||
print_info "Registering control plane node: $NODE_NAME (IP: $TARGET_IP)"
|
||||
|
||||
# Initialize the node in cluster.nodes.active if not already present
|
||||
@@ -288,14 +250,8 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
|
||||
|
||||
# Copy current Talos version and schematic ID to this node
|
||||
current_talos_version=$(wild-config "cluster.nodes.talos.version")
|
||||
current_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
||||
if [ -n "$current_talos_version" ] && [ "$current_talos_version" != "null" ]; then
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$current_talos_version"
|
||||
fi
|
||||
if [ -n "$current_schematic_id" ] && [ "$current_schematic_id" != "null" ]; then
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$current_schematic_id"
|
||||
fi
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
|
||||
|
||||
echo ""
|
||||
read -p "Bring node $NODE_NAME ($TARGET_IP) up now? (y/N): " -r apply_config
|
||||
@@ -315,7 +271,7 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
||||
read -p "The cluster should be bootstrapped after the first control node is ready. Is it ready?: " -r is_ready
|
||||
if [[ $is_ready =~ ^[Yy]$ ]]; then
|
||||
print_info "Bootstrapping control plane node $TARGET_IP..."
|
||||
talos config endpoint "$TARGET_IP"
|
||||
talosctl config endpoint "$TARGET_IP"
|
||||
|
||||
# Attempt to bootstrap the cluster
|
||||
if talosctl bootstrap --nodes "$TARGET_IP" 2>&1 | tee /tmp/bootstrap_output.log; then
|
||||
@@ -425,14 +381,8 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
|
||||
|
||||
# Copy current Talos version and schematic ID to this node
|
||||
current_talos_version=$(wild-config "cluster.nodes.talos.version")
|
||||
current_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
||||
if [ -n "$current_talos_version" ] && [ "$current_talos_version" != "null" ]; then
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$current_talos_version"
|
||||
fi
|
||||
if [ -n "$current_schematic_id" ] && [ "$current_schematic_id" != "null" ]; then
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$current_schematic_id"
|
||||
fi
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
|
||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
|
||||
|
||||
print_success "Worker node $NODE_NAME registered successfully:"
|
||||
print_info " - Name: $NODE_NAME"
|
||||
|
@@ -65,9 +65,7 @@ if [ -z "$(wild-config "cluster.name")" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_header "Wild Cloud Services Setup"
|
||||
print_info "Installing Kubernetes cluster services"
|
||||
echo ""
|
||||
print_header "Wild Cloud services setup"
|
||||
|
||||
if ! command -v kubectl >/dev/null 2>&1; then
|
||||
print_error "kubectl is not installed or not in PATH"
|
||||
|
@@ -4,28 +4,28 @@ set -e
|
||||
set -o pipefail
|
||||
|
||||
# Parse arguments
|
||||
UPDATE=false
|
||||
FORCE=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--update)
|
||||
UPDATE=true
|
||||
--force)
|
||||
FORCE=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [--update]"
|
||||
echo "Usage: $0 [--force]"
|
||||
echo ""
|
||||
echo "Copy Wild Cloud documentation to the current cloud directory."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --update Update existing docs (overwrite)"
|
||||
echo " --force Force overwrite of existing docs"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
exit 0
|
||||
;;
|
||||
-*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [--update]"
|
||||
echo "Usage: $0 [--force]"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
@@ -48,21 +48,21 @@ fi
|
||||
DOCS_DEST="${WC_HOME}/docs"
|
||||
|
||||
# Check if docs already exist
|
||||
if [ -d "${DOCS_DEST}" ] && [ "${UPDATE}" = false ]; then
|
||||
echo "Documentation already exists at ${DOCS_DEST}"
|
||||
if [ -d "${DOCS_DEST}" ] && [ "${FORCE}" = false ]; then
|
||||
print_warning "Documentation already exists at ${DOCS_DEST}"
|
||||
read -p "Do you want to update documentation files? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
UPDATE=true
|
||||
FORCE=true
|
||||
else
|
||||
echo "Skipping documentation update."
|
||||
print_info "Skipping documentation update."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy docs directory from root to WC_HOME
|
||||
if [ -d "${WC_ROOT}/docs" ]; then
|
||||
if [ "${UPDATE}" = true ] && [ -d "${DOCS_DEST}" ]; then
|
||||
if [ "${FORCE}" = true ] && [ -d "${DOCS_DEST}" ]; then
|
||||
rm -rf "${DOCS_DEST}"
|
||||
fi
|
||||
cp -r "${WC_ROOT}/docs" "${DOCS_DEST}"
|
23
env.sh
23
env.sh
@@ -7,27 +7,4 @@ export WC_ROOT="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" && pwd)"
|
||||
# Add bin to path first so wild-config is available
|
||||
export PATH="$WC_ROOT/bin:$PATH"
|
||||
|
||||
# Install kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Error: kubectl is not installed. Installing."
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
fi
|
||||
|
||||
# Install talosctl
|
||||
if ! command -v talosctl &> /dev/null; then
|
||||
echo "Error: talosctl is not installed. Installing."
|
||||
curl -sL https://talos.dev/install | sh
|
||||
fi
|
||||
|
||||
|
||||
# Check if gomplate is installed
|
||||
if ! command -v gomplate &> /dev/null; then
|
||||
echo "Error: gomplate is not installed. Please install gomplate first."
|
||||
echo "Visit: https://docs.gomplate.ca/installing/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Wild Cloud root ready."
|
||||
|
@@ -84,7 +84,7 @@ prompt_with_default() {
|
||||
if [ -n "${default}" ]; then
|
||||
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
||||
else
|
||||
printf "%s [default: empty]: " "${prompt}" >&2
|
||||
printf "%s: " "${prompt}" >&2
|
||||
fi
|
||||
read -r result
|
||||
if [ -z "${result}" ]; then
|
||||
@@ -109,16 +109,18 @@ prompt_if_unset_config() {
|
||||
local prompt="$2"
|
||||
local default="$3"
|
||||
|
||||
local current_value
|
||||
current_value=$(wild-config "${config_path}")
|
||||
|
||||
if [ -z "${current_value}" ] || [ "${current_value}" = "null" ]; then
|
||||
# Check if key exists first to avoid error messages
|
||||
if wild-config --check "${config_path}"; then
|
||||
# Key exists, get its value
|
||||
local current_value
|
||||
current_value=$(wild-config "${config_path}")
|
||||
print_info "Using existing ${config_path} = ${current_value}"
|
||||
else
|
||||
# Key doesn't exist, prompt for it
|
||||
local new_value
|
||||
new_value=$(prompt_with_default "${prompt}" "${default}" "")
|
||||
wild-config-set "${config_path}" "${new_value}"
|
||||
print_info "Set ${config_path} = ${new_value}"
|
||||
else
|
||||
print_info "Using existing ${config_path} = ${current_value}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -128,16 +130,16 @@ prompt_if_unset_secret() {
|
||||
local prompt="$2"
|
||||
local default="$3"
|
||||
|
||||
local current_value
|
||||
current_value=$(wild-secret "${secret_path}")
|
||||
|
||||
if [ -z "${current_value}" ] || [ "${current_value}" = "null" ]; then
|
||||
# Check if key exists first to avoid error messages
|
||||
if wild-secret --check "${secret_path}"; then
|
||||
# Key exists, we don't show the value for security
|
||||
print_info "Using existing secret ${secret_path}"
|
||||
else
|
||||
# Key doesn't exist, prompt for it
|
||||
local new_value
|
||||
new_value=$(prompt_with_default "${prompt}" "${default}" "")
|
||||
wild-secret-set "${secret_path}" "${new_value}"
|
||||
print_info "Set secret ${secret_path}"
|
||||
else
|
||||
print_info "Using existing secret ${secret_path}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -168,7 +170,7 @@ init_wild_env() {
|
||||
if [ -z "${WC_ROOT}" ]; then
|
||||
echo "ERROR: WC_ROOT is not set."
|
||||
exit 1
|
||||
else
|
||||
fi
|
||||
|
||||
# Check if WC_ROOT is a valid directory
|
||||
if [ ! -d "${WC_ROOT}" ]; then
|
||||
@@ -187,6 +189,33 @@ init_wild_env() {
|
||||
echo "ERROR: This command must be run from within a wildcloud home directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Error: kubectl is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||
fi
|
||||
|
||||
# Check talosctl
|
||||
if ! command -v talosctl &> /dev/null; then
|
||||
echo "Error: talosctl is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||
fi
|
||||
|
||||
# Check gomplate
|
||||
if ! command -v gomplate &> /dev/null; then
|
||||
echo "Error: gomplate is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check yq
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo "Error: yq is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check restic
|
||||
if ! command -v restic &> /dev/null; then
|
||||
echo "Error: restic is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Install kubectl
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Error: kubectl is not installed. Installing."
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
fi
|
||||
|
||||
# Install gomplate
|
||||
if command -v gomplate &> /dev/null; then
|
||||
echo "gomplate is already installed."
|
@@ -45,3 +45,18 @@
|
||||
- siderolabs/nvidia-open-gpu-kernel-modules-lts
|
||||
- siderolabs/nvidia-open-gpu-kernel-modules-production
|
||||
- siderolabs/util-linux-tools"
|
||||
"56774e0894c8a3a3a9834a2aea65f24163cacf9506abbcbdc3ba135eaca4953f":
|
||||
version: "v1.11.0"
|
||||
architecture: "amd64"
|
||||
secureBoot: false
|
||||
schematic:
|
||||
customization:
|
||||
systemExtensions:
|
||||
officialExtensions:
|
||||
- siderolabs/gvisor
|
||||
- siderolabs/intel-ucode
|
||||
- siderolabs/iscsi-tools
|
||||
- siderolabs/nvidia-container-toolkit-production
|
||||
- siderolabs/nvidia-fabricmanager-production
|
||||
- siderolabs/nvidia-open-gpu-kernel-modules-production
|
||||
- siderolabs/util-linux-tools
|
||||
|
Reference in New Issue
Block a user