Compare commits
2 Commits
61460b63a3
...
main
Author | SHA1 | Date | |
---|---|---|---|
![]() |
8a569a1720 | ||
![]() |
af60d0c744 |
@@ -58,10 +58,8 @@ fi
|
|||||||
|
|
||||||
print_header "Talos Cluster Configuration Generation"
|
print_header "Talos Cluster Configuration Generation"
|
||||||
|
|
||||||
# Ensure required directories exist
|
|
||||||
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
|
||||||
|
|
||||||
# Check if generated directory already exists and has content
|
# Check if generated directory already exists and has content
|
||||||
|
NODE_SETUP_DIR="${WC_HOME}/setup/cluster-nodes"
|
||||||
if [ -d "${NODE_SETUP_DIR}/generated" ] && [ "$(ls -A "${NODE_SETUP_DIR}/generated" 2>/dev/null)" ] && [ "$FORCE" = false ]; then
|
if [ -d "${NODE_SETUP_DIR}/generated" ] && [ "$(ls -A "${NODE_SETUP_DIR}/generated" 2>/dev/null)" ] && [ "$FORCE" = false ]; then
|
||||||
print_success "Cluster configuration already exists in ${NODE_SETUP_DIR}/generated/"
|
print_success "Cluster configuration already exists in ${NODE_SETUP_DIR}/generated/"
|
||||||
print_info "Skipping cluster configuration generation"
|
print_info "Skipping cluster configuration generation"
|
||||||
@@ -77,8 +75,6 @@ if [ -d "${NODE_SETUP_DIR}/generated" ]; then
|
|||||||
rm -rf "${NODE_SETUP_DIR}/generated"
|
rm -rf "${NODE_SETUP_DIR}/generated"
|
||||||
fi
|
fi
|
||||||
mkdir -p "${NODE_SETUP_DIR}/generated"
|
mkdir -p "${NODE_SETUP_DIR}/generated"
|
||||||
talosctl gen secrets
|
|
||||||
print_info "New secrets will be generated in ${NODE_SETUP_DIR}/generated/"
|
|
||||||
|
|
||||||
# Ensure we have the configuration we need.
|
# Ensure we have the configuration we need.
|
||||||
|
|
||||||
@@ -94,9 +90,8 @@ print_info "Cluster name: $CLUSTER_NAME"
|
|||||||
print_info "Control plane endpoint: https://$VIP:6443"
|
print_info "Control plane endpoint: https://$VIP:6443"
|
||||||
|
|
||||||
cd "${NODE_SETUP_DIR}/generated"
|
cd "${NODE_SETUP_DIR}/generated"
|
||||||
|
talosctl gen secrets
|
||||||
talosctl gen config --with-secrets secrets.yaml "$CLUSTER_NAME" "https://$VIP:6443"
|
talosctl gen config --with-secrets secrets.yaml "$CLUSTER_NAME" "https://$VIP:6443"
|
||||||
cd - >/dev/null
|
cd - >/dev/null
|
||||||
|
|
||||||
# Verify generated files
|
|
||||||
|
|
||||||
print_success "Cluster configuration generation completed!"
|
print_success "Cluster configuration generation completed!"
|
@@ -51,76 +51,32 @@ else
|
|||||||
init_wild_env
|
init_wild_env
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check for required configuration
|
|
||||||
if [ -z "$(wild-config "cluster.nodes.talos.version")" ] || [ -z "$(wild-config "cluster.nodes.talos.schematicId")" ]; then
|
|
||||||
print_header "Talos Configuration Required"
|
|
||||||
print_error "Missing required Talos configuration"
|
|
||||||
print_info "Please run 'wild-setup' first to configure your cluster"
|
|
||||||
print_info "Or set the required configuration manually:"
|
|
||||||
print_info " wild-config-set cluster.nodes.talos.version v1.10.4"
|
|
||||||
print_info " wild-config-set cluster.nodes.talos.schematicId YOUR_SCHEMATIC_ID"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# INSTALLER IMAGE GENERATION AND ASSET DOWNLOADING
|
# INSTALLER IMAGE GENERATION AND ASSET DOWNLOADING
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
print_header "Talos Installer Image Generation and Asset Download"
|
print_header "Talos asset download"
|
||||||
|
|
||||||
# Get Talos version and schematic ID from config
|
# Talos version
|
||||||
TALOS_VERSION=$(wild-config cluster.nodes.talos.version)
|
prompt_if_unset_config "cluster.nodes.talos.version" "Talos version" "v1.11.0"
|
||||||
SCHEMATIC_ID=$(wild-config cluster.nodes.talos.schematicId)
|
TALOS_VERSION=$(wild-config "cluster.nodes.talos.version")
|
||||||
|
|
||||||
|
# Talos schematic ID
|
||||||
|
prompt_if_unset_config "cluster.nodes.talos.schematicId" "Talos schematic ID" "56774e0894c8a3a3a9834a2aea65f24163cacf9506abbcbdc3ba135eaca4953f"
|
||||||
|
SCHEMATIC_ID=$(wild-config "cluster.nodes.talos.schematicId")
|
||||||
|
|
||||||
print_info "Creating custom Talos installer image..."
|
print_info "Creating custom Talos installer image..."
|
||||||
print_info "Talos version: $TALOS_VERSION"
|
print_info "Talos version: $TALOS_VERSION"
|
||||||
|
|
||||||
# Validate schematic ID
|
|
||||||
if [ -z "$SCHEMATIC_ID" ] || [ "$SCHEMATIC_ID" = "null" ]; then
|
|
||||||
print_error "No schematic ID found in config.yaml"
|
|
||||||
print_info "Please run 'wild-setup' first to configure your cluster"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_info "Schematic ID: $SCHEMATIC_ID"
|
print_info "Schematic ID: $SCHEMATIC_ID"
|
||||||
|
|
||||||
if [ -f "${WC_HOME}/config.yaml" ] && yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions' "${WC_HOME}/config.yaml" >/dev/null 2>&1; then
|
|
||||||
echo ""
|
|
||||||
print_info "Schematic includes:"
|
|
||||||
yq eval '.cluster.nodes.talos.schematic.customization.systemExtensions.officialExtensions[]' "${WC_HOME}/config.yaml" | sed 's/^/ - /' || true
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Generate installer image URL
|
|
||||||
INSTALLER_URL="factory.talos.dev/metal-installer/$SCHEMATIC_ID:$TALOS_VERSION"
|
INSTALLER_URL="factory.talos.dev/metal-installer/$SCHEMATIC_ID:$TALOS_VERSION"
|
||||||
|
|
||||||
print_success "Custom installer image URL generated!"
|
|
||||||
echo ""
|
|
||||||
print_info "Installer URL: $INSTALLER_URL"
|
print_info "Installer URL: $INSTALLER_URL"
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# ASSET DOWNLOADING AND CACHING
|
# ASSET DOWNLOADING AND CACHING
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
print_header "Downloading and Caching PXE Boot Assets"
|
print_header "Downloading and caching boot assets"
|
||||||
|
|
||||||
# Create cache directories organized by schematic ID
|
|
||||||
CACHE_DIR="${WC_HOME}/.wildcloud"
|
|
||||||
SCHEMATIC_CACHE_DIR="${CACHE_DIR}/node-boot-assets/${SCHEMATIC_ID}"
|
|
||||||
PXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/pxe"
|
|
||||||
IPXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/ipxe"
|
|
||||||
ISO_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/iso"
|
|
||||||
mkdir -p "$PXE_CACHE_DIR/amd64"
|
|
||||||
mkdir -p "$IPXE_CACHE_DIR"
|
|
||||||
mkdir -p "$ISO_CACHE_DIR"
|
|
||||||
|
|
||||||
# Download Talos kernel and initramfs for PXE boot
|
|
||||||
print_info "Downloading Talos PXE assets..."
|
|
||||||
KERNEL_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/kernel-amd64"
|
|
||||||
INITRAMFS_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/initramfs-amd64.xz"
|
|
||||||
|
|
||||||
KERNEL_PATH="${PXE_CACHE_DIR}/amd64/vmlinuz"
|
|
||||||
INITRAMFS_PATH="${PXE_CACHE_DIR}/amd64/initramfs.xz"
|
|
||||||
|
|
||||||
# Function to download with progress
|
# Function to download with progress
|
||||||
download_asset() {
|
download_asset() {
|
||||||
@@ -129,17 +85,19 @@ download_asset() {
|
|||||||
local description="$3"
|
local description="$3"
|
||||||
|
|
||||||
if [ -f "$path" ]; then
|
if [ -f "$path" ]; then
|
||||||
print_info "$description already cached at $path"
|
print_success "$description already cached at $path"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
print_info "Downloading $description..."
|
print_info "Downloading $description..."
|
||||||
print_info "URL: $url"
|
print_info "URL: $url"
|
||||||
|
|
||||||
if command -v wget >/dev/null 2>&1; then
|
if command -v curl >/dev/null 2>&1; then
|
||||||
wget --progress=bar:force -O "$path" "$url"
|
curl -L -o "$path" "$url" \
|
||||||
elif command -v curl >/dev/null 2>&1; then
|
--progress-bar \
|
||||||
curl -L --progress-bar -o "$path" "$url"
|
--write-out "✓ Downloaded %{size_download} bytes at %{speed_download} B/s\n"
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
wget --progress=bar:force:noscroll -O "$path" "$url"
|
||||||
else
|
else
|
||||||
print_error "Neither wget nor curl is available for downloading"
|
print_error "Neither wget nor curl is available for downloading"
|
||||||
return 1
|
return 1
|
||||||
@@ -153,42 +111,51 @@ download_asset() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
print_success "$description downloaded successfully"
|
print_success "$description downloaded successfully"
|
||||||
|
echo
|
||||||
}
|
}
|
||||||
|
|
||||||
# Download Talos PXE assets
|
CACHE_DIR="${WC_HOME}/.wildcloud"
|
||||||
|
SCHEMATIC_CACHE_DIR="${CACHE_DIR}/node-boot-assets/${SCHEMATIC_ID}"
|
||||||
|
PXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/pxe"
|
||||||
|
IPXE_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/ipxe"
|
||||||
|
ISO_CACHE_DIR="${SCHEMATIC_CACHE_DIR}/iso"
|
||||||
|
mkdir -p "$PXE_CACHE_DIR/amd64"
|
||||||
|
mkdir -p "$IPXE_CACHE_DIR"
|
||||||
|
mkdir -p "$ISO_CACHE_DIR"
|
||||||
|
|
||||||
|
# Download Talos kernel and initramfs for PXE boot
|
||||||
|
KERNEL_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/kernel-amd64"
|
||||||
|
KERNEL_PATH="${PXE_CACHE_DIR}/amd64/vmlinuz"
|
||||||
download_asset "$KERNEL_URL" "$KERNEL_PATH" "Talos kernel"
|
download_asset "$KERNEL_URL" "$KERNEL_PATH" "Talos kernel"
|
||||||
|
|
||||||
|
INITRAMFS_URL="https://pxe.factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/initramfs-amd64.xz"
|
||||||
|
INITRAMFS_PATH="${PXE_CACHE_DIR}/amd64/initramfs.xz"
|
||||||
download_asset "$INITRAMFS_URL" "$INITRAMFS_PATH" "Talos initramfs"
|
download_asset "$INITRAMFS_URL" "$INITRAMFS_PATH" "Talos initramfs"
|
||||||
|
|
||||||
# Download iPXE bootloader files
|
# Download iPXE bootloader files
|
||||||
print_info "Downloading iPXE bootloader assets..."
|
|
||||||
download_asset "http://boot.ipxe.org/ipxe.efi" "${IPXE_CACHE_DIR}/ipxe.efi" "iPXE EFI bootloader"
|
download_asset "http://boot.ipxe.org/ipxe.efi" "${IPXE_CACHE_DIR}/ipxe.efi" "iPXE EFI bootloader"
|
||||||
download_asset "http://boot.ipxe.org/undionly.kpxe" "${IPXE_CACHE_DIR}/undionly.kpxe" "iPXE BIOS bootloader"
|
download_asset "http://boot.ipxe.org/undionly.kpxe" "${IPXE_CACHE_DIR}/undionly.kpxe" "iPXE BIOS bootloader"
|
||||||
download_asset "http://boot.ipxe.org/arm64-efi/ipxe.efi" "${IPXE_CACHE_DIR}/ipxe-arm64.efi" "iPXE ARM64 EFI bootloader"
|
download_asset "http://boot.ipxe.org/arm64-efi/ipxe.efi" "${IPXE_CACHE_DIR}/ipxe-arm64.efi" "iPXE ARM64 EFI bootloader"
|
||||||
|
|
||||||
# Download Talos ISO
|
# Download Talos ISO
|
||||||
print_info "Downloading Talos ISO..."
|
|
||||||
ISO_URL="https://factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/metal-amd64.iso"
|
ISO_URL="https://factory.talos.dev/image/${SCHEMATIC_ID}/${TALOS_VERSION}/metal-amd64.iso"
|
||||||
ISO_FILENAME="talos-${TALOS_VERSION}-metal-amd64.iso"
|
ISO_PATH="${ISO_CACHE_DIR}/talos-${TALOS_VERSION}-metal-amd64.iso"
|
||||||
ISO_PATH="${ISO_CACHE_DIR}/${ISO_FILENAME}"
|
|
||||||
download_asset "$ISO_URL" "$ISO_PATH" "Talos ISO"
|
download_asset "$ISO_URL" "$ISO_PATH" "Talos ISO"
|
||||||
|
|
||||||
echo ""
|
print_header "Summary"
|
||||||
print_success "All assets downloaded and cached!"
|
print_success "Cached assets for schematic $SCHEMATIC_ID:"
|
||||||
echo ""
|
echo "- Talos kernel: $KERNEL_PATH"
|
||||||
print_info "Cached assets for schematic $SCHEMATIC_ID:"
|
echo "- Talos initramfs: $INITRAMFS_PATH"
|
||||||
echo " Talos kernel: $KERNEL_PATH"
|
echo "- Talos ISO: $ISO_PATH"
|
||||||
echo " Talos initramfs: $INITRAMFS_PATH"
|
echo "- iPXE EFI: ${IPXE_CACHE_DIR}/ipxe.efi"
|
||||||
echo " Talos ISO: $ISO_PATH"
|
echo "- iPXE BIOS: ${IPXE_CACHE_DIR}/undionly.kpxe"
|
||||||
echo " iPXE EFI: ${IPXE_CACHE_DIR}/ipxe.efi"
|
echo "- iPXE ARM64: ${IPXE_CACHE_DIR}/ipxe-arm64.efi"
|
||||||
echo " iPXE BIOS: ${IPXE_CACHE_DIR}/undionly.kpxe"
|
|
||||||
echo " iPXE ARM64: ${IPXE_CACHE_DIR}/ipxe-arm64.efi"
|
|
||||||
echo ""
|
echo ""
|
||||||
print_info "Cache location: $SCHEMATIC_CACHE_DIR"
|
print_info "Cache location: $SCHEMATIC_CACHE_DIR"
|
||||||
echo ""
|
echo ""
|
||||||
print_info "Use these assets for:"
|
print_info "Use these assets for:"
|
||||||
echo " - PXE boot: Use kernel and initramfs from cache"
|
echo "- PXE boot: Use kernel and initramfs from cache"
|
||||||
echo " - USB creation: Use ISO file for dd or imaging tools"
|
echo "- USB creation: Use ISO file for dd or imaging tools"
|
||||||
echo " Example: sudo dd if=$ISO_PATH of=/dev/sdX bs=4M status=progress"
|
echo " Example: sudo dd if=$ISO_PATH of=/dev/sdX bs=4M status=progress"
|
||||||
echo " - Custom installer: https://$INSTALLER_URL"
|
echo "- Custom installer: https://$INSTALLER_URL"
|
||||||
echo ""
|
echo ""
|
||||||
print_success "Installer image generation and asset caching completed!"
|
|
@@ -96,7 +96,7 @@ else
|
|||||||
init_wild_env
|
init_wild_env
|
||||||
fi
|
fi
|
||||||
|
|
||||||
print_header "Talos Node Configuration Application"
|
print_header "Talos node configuration"
|
||||||
|
|
||||||
# Check if the specified node is registered
|
# Check if the specified node is registered
|
||||||
NODE_INTERFACE=$(yq eval ".cluster.nodes.active.\"${NODE_NAME}\".interface" "${WC_HOME}/config.yaml" 2>/dev/null)
|
NODE_INTERFACE=$(yq eval ".cluster.nodes.active.\"${NODE_NAME}\".interface" "${WC_HOME}/config.yaml" 2>/dev/null)
|
||||||
@@ -156,10 +156,7 @@ PATCH_FILE="${NODE_SETUP_DIR}/patch/${NODE_NAME}.yaml"
|
|||||||
|
|
||||||
# Check if patch file exists
|
# Check if patch file exists
|
||||||
if [ ! -f "$PATCH_FILE" ]; then
|
if [ ! -f "$PATCH_FILE" ]; then
|
||||||
print_error "Patch file not found: $PATCH_FILE"
|
wild-cluster-node-patch-generate "$NODE_NAME"
|
||||||
print_info "Generate the patch file first:"
|
|
||||||
print_info " wild-cluster-node-patch-generate $NODE_NAME"
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Determine base config file
|
# Determine base config file
|
||||||
|
124
bin/wild-cluster-services-configure
Executable file
124
bin/wild-cluster-services-configure
Executable file
@@ -0,0 +1,124 @@
|
|||||||
|
#\!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Usage function
|
||||||
|
usage() {
|
||||||
|
echo "Usage: wild-cluster-services-configure [options] [service...]"
|
||||||
|
echo ""
|
||||||
|
echo "Compile service templates with configuration"
|
||||||
|
echo ""
|
||||||
|
echo "Arguments:"
|
||||||
|
echo " service Specific service(s) to compile (optional)"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " wild-cluster-services-configure # Compile all services"
|
||||||
|
echo " wild-cluster-services-configure metallb traefik # Compile specific services"
|
||||||
|
echo ""
|
||||||
|
echo "Available services:"
|
||||||
|
echo " metallb, longhorn, traefik, coredns, cert-manager,"
|
||||||
|
echo " externaldns, kubernetes-dashboard, nfs, docker-registry"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
DRY_RUN=false
|
||||||
|
LIST_SERVICES=false
|
||||||
|
SPECIFIC_SERVICES=()
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
--dry-run)
|
||||||
|
DRY_RUN=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
SPECIFIC_SERVICES+=("$1")
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Initialize Wild Cloud environment
|
||||||
|
if [ -z "${WC_ROOT}" ]; then
|
||||||
|
print "WC_ROOT is not set."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
source "${WC_ROOT}/scripts/common.sh"
|
||||||
|
init_wild_env
|
||||||
|
fi
|
||||||
|
|
||||||
|
CLUSTER_SETUP_DIR="${WC_HOME}/setup/cluster-services"
|
||||||
|
|
||||||
|
# Check if cluster setup directory exists
|
||||||
|
if [ ! -d "$CLUSTER_SETUP_DIR" ]; then
|
||||||
|
print_error "Cluster services setup directory not found: $CLUSTER_SETUP_DIR"
|
||||||
|
print_info "Run 'wild-cluster-services-generate' first to generate setup files"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CLUSTER SERVICES TEMPLATE COMPILATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
print_header "Cluster services template compilation"
|
||||||
|
|
||||||
|
# Get list of services to compile
|
||||||
|
if [ ${#SPECIFIC_SERVICES[@]} -gt 0 ]; then
|
||||||
|
SERVICES_TO_INSTALL=("${SPECIFIC_SERVICES[@]}")
|
||||||
|
print_info "Compiling specific services: ${SERVICES_TO_INSTALL[*]}"
|
||||||
|
else
|
||||||
|
# Compile all available services in a specific order for dependencies
|
||||||
|
SERVICES_TO_INSTALL=(
|
||||||
|
"metallb"
|
||||||
|
"longhorn"
|
||||||
|
"traefik"
|
||||||
|
"coredns"
|
||||||
|
"cert-manager"
|
||||||
|
"externaldns"
|
||||||
|
"kubernetes-dashboard"
|
||||||
|
"nfs"
|
||||||
|
"docker-registry"
|
||||||
|
)
|
||||||
|
print_info "Installing all available services"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_info "Services to compile: ${SERVICES_TO_INSTALL[*]}"
|
||||||
|
|
||||||
|
# Compile services
|
||||||
|
cd "$CLUSTER_SETUP_DIR"
|
||||||
|
INSTALLED_COUNT=0
|
||||||
|
FAILED_COUNT=0
|
||||||
|
|
||||||
|
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||||
|
print_info "Compiling $service"
|
||||||
|
|
||||||
|
service_dir="$CLUSTER_SETUP_DIR/$service"
|
||||||
|
source_service_dir="$service_dir/kustomize.template"
|
||||||
|
dest_service_dir="$service_dir/kustomize"
|
||||||
|
|
||||||
|
# Run configuration to make sure we have the template values we need.
|
||||||
|
config_script="$service_dir/configure.sh"
|
||||||
|
if [ -f "$config_script" ]; then
|
||||||
|
source "$config_script"
|
||||||
|
fi
|
||||||
|
|
||||||
|
wild-compile-template-dir --clean "$source_service_dir" "$dest_service_dir"
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
|
|
||||||
|
cd - >/dev/null
|
||||||
|
|
||||||
|
print_success "Successfully compiled: $INSTALLED_COUNT services"
|
148
bin/wild-cluster-services-fetch
Executable file
148
bin/wild-cluster-services-fetch
Executable file
@@ -0,0 +1,148 @@
|
|||||||
|
#\!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
# Usage function
|
||||||
|
usage() {
|
||||||
|
echo "Usage: wild-cluster-services-fetch [options]"
|
||||||
|
echo ""
|
||||||
|
echo "Fetch cluster services setup files from the repository."
|
||||||
|
echo ""
|
||||||
|
echo "Arguments:"
|
||||||
|
echo " service Specific service(s) to install (optional)"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo " --force Force fetching even if files exist"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " wild-cluster-services-fetch # Fetch all services"
|
||||||
|
echo " wild-cluster-services-fetch metallb traefik # Fetch specific services"
|
||||||
|
echo ""
|
||||||
|
echo "Available services:"
|
||||||
|
echo " metallb, longhorn, traefik, coredns, cert-manager,"
|
||||||
|
echo " externaldns, kubernetes-dashboard, nfs, docker-registry"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
FORCE=false
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
--force)
|
||||||
|
FORCE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected argument: $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Initialize Wild Cloud environment
|
||||||
|
if [ -z "${WC_ROOT}" ]; then
|
||||||
|
print "WC_ROOT is not set."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
source "${WC_ROOT}/scripts/common.sh"
|
||||||
|
init_wild_env
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_header "Fetching cluster services templates"
|
||||||
|
|
||||||
|
SOURCE_DIR="${WC_ROOT}/setup/cluster-services"
|
||||||
|
DEST_DIR="${WC_HOME}/setup/cluster-services"
|
||||||
|
|
||||||
|
# Check if source directory exists
|
||||||
|
if [ ! -d "$SOURCE_DIR" ]; then
|
||||||
|
print_error "Cluster setup source directory not found: $SOURCE_DIR"
|
||||||
|
print_info "Make sure the wild-cloud repository is properly set up"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if destination already exists
|
||||||
|
if [ -d "$DEST_DIR" ] && [ "$FORCE" = false ]; then
|
||||||
|
print_warning "Cluster setup directory already exists: $DEST_DIR"
|
||||||
|
read -p "Overwrite existing files? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
FORCE=true
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
mkdir -p "$DEST_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy README
|
||||||
|
if [ ! -f "${WC_HOME}/setup/README.md" ]; then
|
||||||
|
cp "${WC_ROOT}/setup/README.md" "${WC_HOME}/setup/README.md"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get list of services to install
|
||||||
|
if [ ${#SPECIFIC_SERVICES[@]} -gt 0 ]; then
|
||||||
|
SERVICES_TO_INSTALL=("${SPECIFIC_SERVICES[@]}")
|
||||||
|
print_info "Fetching specific services: ${SERVICES_TO_INSTALL[*]}"
|
||||||
|
else
|
||||||
|
# Install all available services in a specific order for dependencies
|
||||||
|
SERVICES_TO_INSTALL=(
|
||||||
|
"metallb"
|
||||||
|
"longhorn"
|
||||||
|
"traefik"
|
||||||
|
"coredns"
|
||||||
|
"cert-manager"
|
||||||
|
"externaldns"
|
||||||
|
"kubernetes-dashboard"
|
||||||
|
"nfs"
|
||||||
|
"docker-registry"
|
||||||
|
)
|
||||||
|
print_info "Fetching all available services."
|
||||||
|
fi
|
||||||
|
|
||||||
|
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||||
|
|
||||||
|
SERVICE_SOURCE_DIR="$SOURCE_DIR/$service"
|
||||||
|
SERVICE_DEST_DIR="$DEST_DIR/$service"
|
||||||
|
TEMPLATE_SOURCE_DIR="$SERVICE_SOURCE_DIR/kustomize.template"
|
||||||
|
TEMPLATE_DEST_DIR="$SERVICE_DEST_DIR/kustomize.template"
|
||||||
|
|
||||||
|
if [ ! -d "$TEMPLATE_SOURCE_DIR" ]; then
|
||||||
|
print_error "Source directory not found: $TEMPLATE_SOURCE_DIR"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if $FORCE && [ -d "$TEMPLATE_DEST_DIR" ]; then
|
||||||
|
print_info "Removing existing $service templates in: $TEMPLATE_DEST_DIR"
|
||||||
|
rm -rf "$TEMPLATE_DEST_DIR"
|
||||||
|
elif [ -d "$TEMPLATE_DEST_DIR" ]; then
|
||||||
|
print_info "Files already exist for $service, skipping (use --force to overwrite)."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$SERVICE_DEST_DIR"
|
||||||
|
mkdir -p "$TEMPLATE_DEST_DIR"
|
||||||
|
cp -f "$SERVICE_SOURCE_DIR/README.md" "$SERVICE_DEST_DIR/"
|
||||||
|
|
||||||
|
if [ -f "$SERVICE_SOURCE_DIR/configure.sh" ]; then
|
||||||
|
cp -f "$SERVICE_SOURCE_DIR/configure.sh" "$SERVICE_DEST_DIR/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$SERVICE_SOURCE_DIR/install.sh" ]; then
|
||||||
|
cp -f "$SERVICE_SOURCE_DIR/install.sh" "$SERVICE_DEST_DIR/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -d "$TEMPLATE_SOURCE_DIR" ]; then
|
||||||
|
cp -r "$TEMPLATE_SOURCE_DIR/"* "$TEMPLATE_DEST_DIR/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Fetched $service templates."
|
||||||
|
done
|
@@ -1,208 +0,0 @@
|
|||||||
#\!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Usage function
|
|
||||||
usage() {
|
|
||||||
echo "Usage: wild-cluster-services-generate [options]"
|
|
||||||
echo ""
|
|
||||||
echo "Generate cluster services setup files by compiling templates."
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " -h, --help Show this help message"
|
|
||||||
echo " --force Force regeneration even if files exist"
|
|
||||||
echo ""
|
|
||||||
echo "This script will:"
|
|
||||||
echo " - Copy cluster service templates from WC_ROOT to WC_HOME"
|
|
||||||
echo " - Compile all templates with current configuration"
|
|
||||||
echo " - Prepare services for installation"
|
|
||||||
echo ""
|
|
||||||
echo "Requirements:"
|
|
||||||
echo " - Must be run from a wild-cloud directory"
|
|
||||||
echo " - Basic cluster configuration must be completed"
|
|
||||||
echo " - Service configuration (DNS, storage, etc.) must be completed"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Parse arguments
|
|
||||||
FORCE=false
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
-h|--help)
|
|
||||||
usage
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
--force)
|
|
||||||
FORCE=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
echo "Unknown option $1"
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unexpected argument: $1"
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Initialize Wild Cloud environment
|
|
||||||
if [ -z "${WC_ROOT}" ]; then
|
|
||||||
print "WC_ROOT is not set."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
source "${WC_ROOT}/scripts/common.sh"
|
|
||||||
init_wild_env
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# CLUSTER SERVICES SETUP GENERATION
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
print_header "Cluster Services Setup Generation"
|
|
||||||
|
|
||||||
SOURCE_DIR="${WC_ROOT}/setup/cluster-services"
|
|
||||||
DEST_DIR="${WC_HOME}/setup/cluster-services"
|
|
||||||
|
|
||||||
# Check if source directory exists
|
|
||||||
if [ ! -d "$SOURCE_DIR" ]; then
|
|
||||||
print_error "Cluster setup source directory not found: $SOURCE_DIR"
|
|
||||||
print_info "Make sure the wild-cloud repository is properly set up"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if destination already exists
|
|
||||||
if [ -d "$DEST_DIR" ] && [ "$FORCE" = false ]; then
|
|
||||||
print_warning "Cluster setup directory already exists: $DEST_DIR"
|
|
||||||
read -p "Overwrite existing files? (y/N): " -n 1 -r
|
|
||||||
echo
|
|
||||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
||||||
print_info "Skipping cluster services generation"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
print_info "Regenerating cluster setup files..."
|
|
||||||
rm -rf "$DEST_DIR"
|
|
||||||
elif [ "$FORCE" = true ] && [ -d "$DEST_DIR" ]; then
|
|
||||||
print_info "Force regeneration enabled, removing existing files..."
|
|
||||||
rm -rf "$DEST_DIR"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Copy and compile cluster setup files
|
|
||||||
print_info "Copying and compiling cluster setup files from repository..."
|
|
||||||
mkdir -p "${WC_HOME}/setup"
|
|
||||||
|
|
||||||
# Copy README if it doesn't exist
|
|
||||||
if [ ! -f "${WC_HOME}/setup/README.md" ]; then
|
|
||||||
cp "${WC_ROOT}/setup/README.md" "${WC_HOME}/setup/README.md"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create destination directory
|
|
||||||
mkdir -p "$DEST_DIR"
|
|
||||||
|
|
||||||
# First, copy root-level files from setup/cluster/ (install-all.sh, get_helm.sh, etc.)
|
|
||||||
print_info "Copying root-level cluster setup files..."
|
|
||||||
for item in "$SOURCE_DIR"/*; do
|
|
||||||
if [ -f "$item" ]; then
|
|
||||||
item_name=$(basename "$item")
|
|
||||||
print_info " Copying: ${item_name}"
|
|
||||||
cp "$item" "$DEST_DIR/$item_name"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Then, process each service directory in the source
|
|
||||||
print_info "Processing service directories..."
|
|
||||||
for service_dir in "$SOURCE_DIR"/*; do
|
|
||||||
if [ ! -d "$service_dir" ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
service_name=$(basename "$service_dir")
|
|
||||||
dest_service_dir="$DEST_DIR/$service_name"
|
|
||||||
|
|
||||||
print_info "Processing service: $service_name"
|
|
||||||
|
|
||||||
# Create destination service directory
|
|
||||||
mkdir -p "$dest_service_dir"
|
|
||||||
|
|
||||||
# Copy all files except kustomize.template directory
|
|
||||||
for item in "$service_dir"/*; do
|
|
||||||
item_name=$(basename "$item")
|
|
||||||
|
|
||||||
if [ "$item_name" = "kustomize.template" ]; then
|
|
||||||
# Compile kustomize.template to kustomize directory
|
|
||||||
if [ -d "$item" ]; then
|
|
||||||
print_info " Compiling kustomize templates for $service_name"
|
|
||||||
wild-compile-template-dir --clean "$item" "$dest_service_dir/kustomize"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Copy other files as-is (install.sh, README.md, etc.)
|
|
||||||
if [ -f "$item" ]; then
|
|
||||||
# Compile individual template files
|
|
||||||
if grep -q "{{" "$item" 2>/dev/null; then
|
|
||||||
print_info " Compiling: ${item_name}"
|
|
||||||
wild-compile-template < "$item" > "$dest_service_dir/$item_name"
|
|
||||||
else
|
|
||||||
cp "$item" "$dest_service_dir/$item_name"
|
|
||||||
fi
|
|
||||||
elif [ -d "$item" ]; then
|
|
||||||
cp -r "$item" "$dest_service_dir/"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
print_success "Cluster setup files copied and compiled"
|
|
||||||
|
|
||||||
# Verify required configuration
|
|
||||||
print_info "Verifying service configuration..."
|
|
||||||
|
|
||||||
MISSING_CONFIG=()
|
|
||||||
|
|
||||||
# Check essential configuration values
|
|
||||||
if [ -z "$(wild-config cluster.name 2>/dev/null)" ]; then
|
|
||||||
MISSING_CONFIG+=("cluster.name")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$(wild-config cloud.domain 2>/dev/null)" ]; then
|
|
||||||
MISSING_CONFIG+=("cloud.domain")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$(wild-config cluster.ipAddressPool 2>/dev/null)" ]; then
|
|
||||||
MISSING_CONFIG+=("cluster.ipAddressPool")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$(wild-config operator.email 2>/dev/null)" ]; then
|
|
||||||
MISSING_CONFIG+=("operator.email")
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ${#MISSING_CONFIG[@]} -gt 0 ]; then
|
|
||||||
print_warning "Some required configuration values are missing:"
|
|
||||||
for config in "${MISSING_CONFIG[@]}"; do
|
|
||||||
print_warning " - $config"
|
|
||||||
done
|
|
||||||
print_info "Run 'wild-setup' to complete the configuration"
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_success "Cluster services setup generation completed!"
|
|
||||||
echo ""
|
|
||||||
print_info "Generated setup directory: $DEST_DIR"
|
|
||||||
echo ""
|
|
||||||
print_info "Available services:"
|
|
||||||
for service_dir in "$DEST_DIR"/*; do
|
|
||||||
if [ -d "$service_dir" ] && [ -f "$service_dir/install.sh" ]; then
|
|
||||||
service_name=$(basename "$service_dir")
|
|
||||||
print_info " - $service_name"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
print_info "Next steps:"
|
|
||||||
echo " 1. Review the generated configuration files in $DEST_DIR"
|
|
||||||
echo " 2. Make sure your cluster is running and kubectl is configured"
|
|
||||||
echo " 3. Install services with: wild-cluster-services-up"
|
|
||||||
echo " 4. Or install individual services by running their install.sh scripts"
|
|
||||||
|
|
||||||
print_success "Ready for cluster services installation!"
|
|
@@ -14,22 +14,15 @@ usage() {
|
|||||||
echo ""
|
echo ""
|
||||||
echo "Options:"
|
echo "Options:"
|
||||||
echo " -h, --help Show this help message"
|
echo " -h, --help Show this help message"
|
||||||
echo " --list List available services"
|
|
||||||
echo " --dry-run Show what would be installed without running"
|
echo " --dry-run Show what would be installed without running"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Examples:"
|
echo "Examples:"
|
||||||
echo " wild-cluster-services-up # Install all services"
|
echo " wild-cluster-services-up # Install all services"
|
||||||
echo " wild-cluster-services-up metallb traefik # Install specific services"
|
echo " wild-cluster-services-up metallb traefik # Install specific services"
|
||||||
echo " wild-cluster-services-up --list # List available services"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Available services (when setup files exist):"
|
echo "Available services:"
|
||||||
echo " metallb, longhorn, traefik, coredns, cert-manager,"
|
echo " metallb, longhorn, traefik, coredns, cert-manager,"
|
||||||
echo " externaldns, kubernetes-dashboard, nfs, docker-registry"
|
echo " externaldns, kubernetes-dashboard, nfs, docker-registry"
|
||||||
echo ""
|
|
||||||
echo "Requirements:"
|
|
||||||
echo " - Must be run from a wild-cloud directory"
|
|
||||||
echo " - Cluster services must be generated first (wild-cluster-services-generate)"
|
|
||||||
echo " - Kubernetes cluster must be running and kubectl configured"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Parse arguments
|
# Parse arguments
|
||||||
@@ -43,10 +36,6 @@ while [[ $# -gt 0 ]]; do
|
|||||||
usage
|
usage
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
--list)
|
|
||||||
LIST_SERVICES=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--dry-run)
|
--dry-run)
|
||||||
DRY_RUN=true
|
DRY_RUN=true
|
||||||
shift
|
shift
|
||||||
@@ -81,43 +70,11 @@ if [ ! -d "$CLUSTER_SETUP_DIR" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Function to get available services
|
|
||||||
get_available_services() {
|
|
||||||
local services=()
|
|
||||||
for service_dir in "$CLUSTER_SETUP_DIR"/*; do
|
|
||||||
if [ -d "$service_dir" ] && [ -f "$service_dir/install.sh" ]; then
|
|
||||||
services+=($(basename "$service_dir"))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
echo "${services[@]}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# List services if requested
|
|
||||||
if [ "$LIST_SERVICES" = true ]; then
|
|
||||||
print_header "Available Cluster Services"
|
|
||||||
AVAILABLE_SERVICES=($(get_available_services))
|
|
||||||
|
|
||||||
if [ ${#AVAILABLE_SERVICES[@]} -eq 0 ]; then
|
|
||||||
print_warning "No services found in $CLUSTER_SETUP_DIR"
|
|
||||||
print_info "Run 'wild-cluster-services-generate' first"
|
|
||||||
else
|
|
||||||
print_info "Services available for installation:"
|
|
||||||
for service in "${AVAILABLE_SERVICES[@]}"; do
|
|
||||||
if [ -f "$CLUSTER_SETUP_DIR/$service/install.sh" ]; then
|
|
||||||
print_success " ✓ $service"
|
|
||||||
else
|
|
||||||
print_warning " ✗ $service (install.sh missing)"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# CLUSTER SERVICES INSTALLATION
|
# CLUSTER SERVICES INSTALLATION
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
print_header "Cluster Services Installation"
|
print_header "Cluster services installation"
|
||||||
|
|
||||||
# Check kubectl connectivity
|
# Check kubectl connectivity
|
||||||
if [ "$DRY_RUN" = false ]; then
|
if [ "$DRY_RUN" = false ]; then
|
||||||
@@ -151,28 +108,11 @@ else
|
|||||||
print_info "Installing all available services"
|
print_info "Installing all available services"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Filter to only include services that actually exist
|
print_info "Services to install: ${SERVICES_TO_INSTALL[*]}"
|
||||||
EXISTING_SERVICES=()
|
|
||||||
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
|
||||||
if [ -d "$CLUSTER_SETUP_DIR/$service" ] && [ -f "$CLUSTER_SETUP_DIR/$service/install.sh" ]; then
|
|
||||||
EXISTING_SERVICES+=("$service")
|
|
||||||
elif [ ${#SPECIFIC_SERVICES[@]} -gt 0 ]; then
|
|
||||||
# Only warn if user specifically requested this service
|
|
||||||
print_warning "Service '$service' not found or missing install.sh"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ ${#EXISTING_SERVICES[@]} -eq 0 ]; then
|
|
||||||
print_error "No installable services found"
|
|
||||||
print_info "Run 'wild-cluster-services-generate' first to generate setup files"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_info "Services to install: ${EXISTING_SERVICES[*]}"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
if [ "$DRY_RUN" = true ]; then
|
||||||
print_info "DRY RUN - would install the following services:"
|
print_info "DRY RUN - would install the following services:"
|
||||||
for service in "${EXISTING_SERVICES[@]}"; do
|
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||||
print_info " - $service: $CLUSTER_SETUP_DIR/$service/install.sh"
|
print_info " - $service: $CLUSTER_SETUP_DIR/$service/install.sh"
|
||||||
done
|
done
|
||||||
exit 0
|
exit 0
|
||||||
@@ -183,7 +123,9 @@ cd "$CLUSTER_SETUP_DIR"
|
|||||||
INSTALLED_COUNT=0
|
INSTALLED_COUNT=0
|
||||||
FAILED_COUNT=0
|
FAILED_COUNT=0
|
||||||
|
|
||||||
for service in "${EXISTING_SERVICES[@]}"; do
|
SOURCE_DIR="${WC_ROOT}/setup/cluster-services"
|
||||||
|
|
||||||
|
for service in "${SERVICES_TO_INSTALL[@]}"; do
|
||||||
echo ""
|
echo ""
|
||||||
print_header "Installing $service"
|
print_header "Installing $service"
|
||||||
|
|
||||||
@@ -206,7 +148,7 @@ cd - >/dev/null
|
|||||||
|
|
||||||
# Summary
|
# Summary
|
||||||
echo ""
|
echo ""
|
||||||
print_header "Installation Summary"
|
print_header "Installation summary"
|
||||||
print_success "Successfully installed: $INSTALLED_COUNT services"
|
print_success "Successfully installed: $INSTALLED_COUNT services"
|
||||||
if [ $FAILED_COUNT -gt 0 ]; then
|
if [ $FAILED_COUNT -gt 0 ]; then
|
||||||
print_warning "Failed to install: $FAILED_COUNT services"
|
print_warning "Failed to install: $FAILED_COUNT services"
|
||||||
@@ -219,13 +161,13 @@ if [ $INSTALLED_COUNT -gt 0 ]; then
|
|||||||
echo " 2. Check service status with: kubectl get services --all-namespaces"
|
echo " 2. Check service status with: kubectl get services --all-namespaces"
|
||||||
|
|
||||||
# Service-specific next steps
|
# Service-specific next steps
|
||||||
if [[ " ${EXISTING_SERVICES[*]} " =~ " kubernetes-dashboard " ]]; then
|
if [[ " ${SERVICES_TO_INSTALL[*]} " =~ " kubernetes-dashboard " ]]; then
|
||||||
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain 2>/dev/null || echo "your-internal-domain")
|
INTERNAL_DOMAIN=$(wild-config cloud.internalDomain 2>/dev/null || echo "your-internal-domain")
|
||||||
echo " 3. Access dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
echo " 3. Access dashboard at: https://dashboard.${INTERNAL_DOMAIN}"
|
||||||
echo " 4. Get dashboard token with: ${WC_ROOT}/bin/dashboard-token"
|
echo " 4. Get dashboard token with: ${WC_ROOT}/bin/dashboard-token"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ " ${EXISTING_SERVICES[*]} " =~ " cert-manager " ]]; then
|
if [[ " ${SERVICES_TO_INSTALL[*]} " =~ " cert-manager " ]]; then
|
||||||
echo " 3. Check cert-manager: kubectl get clusterissuers"
|
echo " 3. Check cert-manager: kubectl get clusterissuers"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@@ -73,11 +73,9 @@ CONFIG_FILE="${WC_HOME}/config.yaml"
|
|||||||
|
|
||||||
# Create config file if it doesn't exist
|
# Create config file if it doesn't exist
|
||||||
if [ ! -f "${CONFIG_FILE}" ]; then
|
if [ ! -f "${CONFIG_FILE}" ]; then
|
||||||
echo "Creating new config file at ${CONFIG_FILE}"
|
print_info "Creating new config file at ${CONFIG_FILE}"
|
||||||
echo "{}" > "${CONFIG_FILE}"
|
echo "{}" > "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Use yq to set the value in the YAML file
|
# Use yq to set the value in the YAML file
|
||||||
yq eval ".${KEY_PATH} = \"${VALUE}\"" -i "${CONFIG_FILE}"
|
yq eval ".${KEY_PATH} = \"${VALUE}\"" -i "${CONFIG_FILE}"
|
||||||
|
|
||||||
echo "Set ${KEY_PATH} = ${VALUE}"
|
|
@@ -48,6 +48,33 @@ while [[ $# -gt 0 ]]; do
|
|||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Check if directory has any files (including hidden files, excluding . and .. and .git)
|
||||||
|
if [ "${UPDATE}" = false ]; then
|
||||||
|
if [ -n "$(find . -maxdepth 1 -name ".*" -o -name "*" | grep -v "^\.$" | grep -v "^\.\.$" | grep -v "^\./\.git$" | head -1)" ]; then
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
YELLOW='\033[1;33m' # Yellow
|
||||||
|
echo -e "${YELLOW}WARNING:${NC} Directory is not empty."
|
||||||
|
read -p "Do you want to overwrite existing files? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
confirm="yes"
|
||||||
|
else
|
||||||
|
confirm="no"
|
||||||
|
fi
|
||||||
|
if [ "$confirm" != "yes" ]; then
|
||||||
|
echo "Aborting setup. Please run this script in an empty directory."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Initialize .wildcloud directory if it doesn't exist.
|
||||||
|
if [ ! -d ".wildcloud" ]; then
|
||||||
|
mkdir -p ".wildcloud"
|
||||||
|
UPDATE=true
|
||||||
|
echo "Created '.wildcloud' directory."
|
||||||
|
fi
|
||||||
|
|
||||||
# Initialize Wild Cloud environment
|
# Initialize Wild Cloud environment
|
||||||
if [ -z "${WC_ROOT}" ]; then
|
if [ -z "${WC_ROOT}" ]; then
|
||||||
echo "WC_ROOT is not set."
|
echo "WC_ROOT is not set."
|
||||||
@@ -56,12 +83,10 @@ else
|
|||||||
source "${WC_ROOT}/scripts/common.sh"
|
source "${WC_ROOT}/scripts/common.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Initialize config.yaml if it doesn't exist.
|
||||||
# Initialize .wildcloud directory if it doesn't exist.
|
if [ ! -f "config.yaml" ]; then
|
||||||
if [ ! -d ".wildcloud" ]; then
|
touch "config.yaml"
|
||||||
mkdir -p ".wildcloud"
|
echo "Created 'config.yaml' file."
|
||||||
UPDATE=true
|
|
||||||
echo "Created '.wildcloud' directory."
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
@@ -84,46 +109,21 @@ if [ -z "$current_cluster_name" ] || [ "$current_cluster_name" = "null" ]; then
|
|||||||
print_info "Set cluster name to: ${cluster_name}"
|
print_info "Set cluster name to: ${cluster_name}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check if current directory is empty for new cloud
|
|
||||||
if [ "${UPDATE}" = false ]; then
|
# =============================================================================
|
||||||
# Check if directory has any files (including hidden files, excluding . and .. and .git)
|
# COPY SCAFFOLD
|
||||||
if [ -n "$(find . -maxdepth 1 -name ".*" -o -name "*" | grep -v "^\.$" | grep -v "^\.\.$" | grep -v "^\./\.git$" | grep -v "^\./\.wildcloud$"| head -1)" ]; then
|
# =============================================================================
|
||||||
echo "Warning: Current directory is not empty."
|
|
||||||
read -p "Do you want to overwrite existing files? (y/N): " -n 1 -r
|
|
||||||
echo
|
|
||||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
|
||||||
confirm="yes"
|
|
||||||
else
|
|
||||||
confirm="no"
|
|
||||||
fi
|
|
||||||
if [ "$confirm" != "yes" ]; then
|
|
||||||
echo "Aborting setup. Please run this script in an empty directory."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Copy cloud files to current directory only if they do not exist.
|
# Copy cloud files to current directory only if they do not exist.
|
||||||
# Ignore files that already exist.
|
# Ignore files that already exist.
|
||||||
SRC_DIR="${WC_ROOT}/setup/home-scaffold"
|
SRC_DIR="${WC_ROOT}/setup/home-scaffold"
|
||||||
rsync -av --ignore-existing --exclude=".git" "${SRC_DIR}/" ./ > /dev/null
|
rsync -av --ignore-existing --exclude=".git" "${SRC_DIR}/" ./ > /dev/null
|
||||||
|
|
||||||
print_success "Ready for cluster setup!"
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# COMPLETION
|
# COPY DOCS
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
print_header "Wild Cloud Scaffold Setup Complete! Welcome to Wild Cloud!"
|
wild-update-docs --force
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Next steps:"
|
|
||||||
echo " 1. Set up your Kubernetes cluster:"
|
|
||||||
echo " wild-setup-cluster"
|
|
||||||
echo ""
|
|
||||||
echo " 2. Install cluster services:"
|
|
||||||
echo " wild-setup-services"
|
|
||||||
echo ""
|
|
||||||
echo "Or run the complete setup:"
|
|
||||||
echo " wild-setup"
|
|
||||||
|
|
||||||
|
print_success "Wild Cloud initialized! Welcome to Wild Cloud!"
|
@@ -11,14 +11,6 @@ SKIP_SERVICES=false
|
|||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--skip-scaffold)
|
|
||||||
SKIP_SCAFFOLD=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--skip-docs)
|
|
||||||
SKIP_DOCS=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--skip-cluster)
|
--skip-cluster)
|
||||||
SKIP_CLUSTER=true
|
SKIP_CLUSTER=true
|
||||||
shift
|
shift
|
||||||
@@ -80,55 +72,12 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
print_header "Wild Cloud Setup"
|
print_header "Wild Cloud Setup"
|
||||||
print_info "Running complete Wild Cloud setup."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# WC_HOME SCAFFOLDING
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
if [ "${SKIP_SCAFFOLD}" = false ]; then
|
|
||||||
print_header "Cloud Home Setup"
|
|
||||||
print_info "Scaffolding your cloud home..."
|
|
||||||
|
|
||||||
if wild-setup-scaffold; then
|
|
||||||
print_success "Cloud home setup completed"
|
|
||||||
else
|
|
||||||
print_error "Cloud home setup failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
else
|
|
||||||
print_info "Skipping Home Setup"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# DOCS
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
if [ "${SKIP_DOCS}" = false ]; then
|
|
||||||
print_header "Cloud Docs"
|
|
||||||
print_info "Preparing your docs..."
|
|
||||||
|
|
||||||
if wild-setup-docs; then
|
|
||||||
print_success "Cloud docs setup completed"
|
|
||||||
else
|
|
||||||
print_error "Cloud docs setup failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ""
|
|
||||||
else
|
|
||||||
print_info "Skipping Docs Setup"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# CLUSTER SETUP
|
# CLUSTER SETUP
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
if [ "${SKIP_CLUSTER}" = false ]; then
|
if [ "${SKIP_CLUSTER}" = false ]; then
|
||||||
print_header "Cluster Setup"
|
|
||||||
print_info "Running wild-setup-cluster..."
|
|
||||||
|
|
||||||
if wild-setup-cluster; then
|
if wild-setup-cluster; then
|
||||||
print_success "Cluster setup completed"
|
print_success "Cluster setup completed"
|
||||||
else
|
else
|
||||||
@@ -145,9 +94,6 @@ fi
|
|||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
if [ "${SKIP_SERVICES}" = false ]; then
|
if [ "${SKIP_SERVICES}" = false ]; then
|
||||||
print_header "Services Setup"
|
|
||||||
print_info "Running wild-setup-services..."
|
|
||||||
|
|
||||||
if wild-setup-services; then
|
if wild-setup-services; then
|
||||||
print_success "Services setup completed"
|
print_success "Services setup completed"
|
||||||
else
|
else
|
||||||
|
@@ -62,40 +62,6 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
print_header "Wild Cloud Cluster Setup"
|
print_header "Wild Cloud Cluster Setup"
|
||||||
print_info "Setting up cluster infrastructure"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Generate initial cluster configuration
|
|
||||||
|
|
||||||
if ! wild-cluster-config-generate; then
|
|
||||||
print_error "Failed to generate cluster configuration"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Configure Talos cli with our new cluster context
|
|
||||||
|
|
||||||
CLUSTER_NAME=$(wild-config "cluster.name")
|
|
||||||
HAS_CONTEXT=$(talosctl config contexts | grep -c "$CLUSTER_NAME" || true)
|
|
||||||
if [ "$HAS_CONTEXT" -eq 0 ]; then
|
|
||||||
print_info "No Talos context found for cluster $CLUSTER_NAME, creating..."
|
|
||||||
talosctl config merge ${WC_HOME}/setup/cluster-nodes/generated/talosconfig
|
|
||||||
talosctl config use "$CLUSTER_NAME"
|
|
||||||
print_success "Talos context for $CLUSTER_NAME created and set as current"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Talos asset download
|
|
||||||
|
|
||||||
if [ "${SKIP_INSTALLER}" = false ]; then
|
|
||||||
print_header "Installer Image Generation"
|
|
||||||
|
|
||||||
print_info "Running wild-cluster-node-boot-assets-download..."
|
|
||||||
wild-cluster-node-boot-assets-download
|
|
||||||
|
|
||||||
print_success "Installer image generated"
|
|
||||||
echo ""
|
|
||||||
else
|
|
||||||
print_info "Skipping: Installer Image Generation"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# Configuration
|
# Configuration
|
||||||
@@ -103,6 +69,9 @@ fi
|
|||||||
|
|
||||||
prompt_if_unset_config "operator.email" "Operator email address"
|
prompt_if_unset_config "operator.email" "Operator email address"
|
||||||
|
|
||||||
|
prompt_if_unset_config "cluster.name" "Cluster name" "wild-cluster"
|
||||||
|
CLUSTER_NAME=$(wild-config "cluster.name")
|
||||||
|
|
||||||
# Configure hostname prefix for unique node names on LAN
|
# Configure hostname prefix for unique node names on LAN
|
||||||
prompt_if_unset_config "cluster.hostnamePrefix" "Hostname prefix (optional, e.g. 'test-' for unique names on LAN)" ""
|
prompt_if_unset_config "cluster.hostnamePrefix" "Hostname prefix (optional, e.g. 'test-' for unique names on LAN)" ""
|
||||||
HOSTNAME_PREFIX=$(wild-config "cluster.hostnamePrefix")
|
HOSTNAME_PREFIX=$(wild-config "cluster.hostnamePrefix")
|
||||||
@@ -123,41 +92,41 @@ prompt_if_unset_config "cluster.ipAddressPool" "MetalLB IP address pool" "${SUBN
|
|||||||
ip_pool=$(wild-config "cluster.ipAddressPool")
|
ip_pool=$(wild-config "cluster.ipAddressPool")
|
||||||
|
|
||||||
# Load balancer IP (automatically set to first address in the pool if not set)
|
# Load balancer IP (automatically set to first address in the pool if not set)
|
||||||
current_lb_ip=$(wild-config "cluster.loadBalancerIp")
|
default_lb_ip=$(echo "${ip_pool}" | cut -d'-' -f1)
|
||||||
if [ -z "$current_lb_ip" ] || [ "$current_lb_ip" = "null" ]; then
|
prompt_if_unset_config "cluster.loadBalancerIp" "Load balancer IP" "${default_lb_ip}"
|
||||||
lb_ip=$(echo "${ip_pool}" | cut -d'-' -f1)
|
|
||||||
wild-config-set "cluster.loadBalancerIp" "${lb_ip}"
|
|
||||||
print_info "Set load balancer IP to: ${lb_ip} (first IP in MetalLB pool)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Talos version
|
# Talos version
|
||||||
prompt_if_unset_config "cluster.nodes.talos.version" "Talos version" "v1.10.4"
|
prompt_if_unset_config "cluster.nodes.talos.version" "Talos version" "v1.11.0"
|
||||||
talos_version=$(wild-config "cluster.nodes.talos.version")
|
talos_version=$(wild-config "cluster.nodes.talos.version")
|
||||||
|
|
||||||
# Talos schematic ID
|
# Talos schematic ID
|
||||||
current_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
prompt_if_unset_config "cluster.nodes.talos.schematicId" "Talos schematic ID" "56774e0894c8a3a3a9834a2aea65f24163cacf9506abbcbdc3ba135eaca4953f"
|
||||||
if [ -z "$current_schematic_id" ] || [ "$current_schematic_id" = "null" ]; then
|
schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
||||||
echo ""
|
|
||||||
print_info "Get your Talos schematic ID from: https://factory.talos.dev/"
|
|
||||||
print_info "This customizes Talos with the drivers needed for your hardware."
|
|
||||||
|
|
||||||
# Use current schematic ID from config as default
|
|
||||||
default_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
|
||||||
if [ -n "$default_schematic_id" ] && [ "$default_schematic_id" != "null" ]; then
|
|
||||||
print_info "Using schematic ID from config for Talos $talos_version"
|
|
||||||
else
|
|
||||||
default_schematic_id=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
schematic_id=$(prompt_with_default "Talos schematic ID" "${default_schematic_id}" "${current_schematic_id}")
|
|
||||||
wild-config-set "cluster.nodes.talos.schematicId" "${schematic_id}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# External DNS
|
# External DNS
|
||||||
cluster_name=$(wild-config "cluster.name")
|
prompt_if_unset_config "cluster.externalDns.ownerId" "External DNS owner ID" "external-dns-${CLUSTER_NAME}"
|
||||||
prompt_if_unset_config "cluster.externalDns.ownerId" "External DNS owner ID" "external-dns-${cluster_name}"
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# TALOS CLUSTER CONFIGURATION
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
prompt_if_unset_config "cluster.nodes.control.vip" "Control plane virtual IP" "${SUBNET_PREFIX}.90"
|
||||||
|
vip=$(wild-config "cluster.nodes.control.vip")
|
||||||
|
|
||||||
|
# Generate initial cluster configuration
|
||||||
|
if ! wild-cluster-config-generate; then
|
||||||
|
print_error "Failed to generate cluster configuration"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Configure Talos cli with our new cluster context
|
||||||
|
HAS_CONTEXT=$(talosctl config contexts | grep -c "$CLUSTER_NAME" || true)
|
||||||
|
if [ "$HAS_CONTEXT" -eq 0 ]; then
|
||||||
|
print_info "No Talos context found for cluster $CLUSTER_NAME, creating..."
|
||||||
|
talosctl config merge ${WC_HOME}/setup/cluster-nodes/generated/talosconfig
|
||||||
|
talosctl config context "$CLUSTER_NAME"
|
||||||
|
print_success "Talos context for $CLUSTER_NAME created and set as current"
|
||||||
|
fi
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# Node setup
|
# Node setup
|
||||||
@@ -167,12 +136,6 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
|||||||
|
|
||||||
print_header "Control Plane Configuration"
|
print_header "Control Plane Configuration"
|
||||||
|
|
||||||
print_info "Configure control plane nodes (you need at least 3 for HA):"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
prompt_if_unset_config "cluster.nodes.control.vip" "Control plane virtual IP" "${SUBNET_PREFIX}.90"
|
|
||||||
vip=$(wild-config "cluster.nodes.control.vip")
|
|
||||||
|
|
||||||
# Automatically configure the first three IPs after VIP for control plane nodes
|
# Automatically configure the first three IPs after VIP for control plane nodes
|
||||||
vip_last_octet=$(echo "$vip" | cut -d. -f4)
|
vip_last_octet=$(echo "$vip" | cut -d. -f4)
|
||||||
vip_prefix=$(echo "$vip" | cut -d. -f1-3)
|
vip_prefix=$(echo "$vip" | cut -d. -f1-3)
|
||||||
@@ -184,7 +147,6 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
|||||||
for i in 1 2 3; do
|
for i in 1 2 3; do
|
||||||
NODE_NAME="${HOSTNAME_PREFIX}control-${i}"
|
NODE_NAME="${HOSTNAME_PREFIX}control-${i}"
|
||||||
TARGET_IP="${vip_prefix}.$(( vip_last_octet + i ))"
|
TARGET_IP="${vip_prefix}.$(( vip_last_octet + i ))"
|
||||||
echo ""
|
|
||||||
print_info "Registering control plane node: $NODE_NAME (IP: $TARGET_IP)"
|
print_info "Registering control plane node: $NODE_NAME (IP: $TARGET_IP)"
|
||||||
|
|
||||||
# Initialize the node in cluster.nodes.active if not already present
|
# Initialize the node in cluster.nodes.active if not already present
|
||||||
@@ -288,14 +250,8 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
|||||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
|
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
|
||||||
|
|
||||||
# Copy current Talos version and schematic ID to this node
|
# Copy current Talos version and schematic ID to this node
|
||||||
current_talos_version=$(wild-config "cluster.nodes.talos.version")
|
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
|
||||||
current_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
|
||||||
if [ -n "$current_talos_version" ] && [ "$current_talos_version" != "null" ]; then
|
|
||||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$current_talos_version"
|
|
||||||
fi
|
|
||||||
if [ -n "$current_schematic_id" ] && [ "$current_schematic_id" != "null" ]; then
|
|
||||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$current_schematic_id"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
read -p "Bring node $NODE_NAME ($TARGET_IP) up now? (y/N): " -r apply_config
|
read -p "Bring node $NODE_NAME ($TARGET_IP) up now? (y/N): " -r apply_config
|
||||||
@@ -315,7 +271,7 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
|||||||
read -p "The cluster should be bootstrapped after the first control node is ready. Is it ready?: " -r is_ready
|
read -p "The cluster should be bootstrapped after the first control node is ready. Is it ready?: " -r is_ready
|
||||||
if [[ $is_ready =~ ^[Yy]$ ]]; then
|
if [[ $is_ready =~ ^[Yy]$ ]]; then
|
||||||
print_info "Bootstrapping control plane node $TARGET_IP..."
|
print_info "Bootstrapping control plane node $TARGET_IP..."
|
||||||
talos config endpoint "$TARGET_IP"
|
talosctl config endpoint "$TARGET_IP"
|
||||||
|
|
||||||
# Attempt to bootstrap the cluster
|
# Attempt to bootstrap the cluster
|
||||||
if talosctl bootstrap --nodes "$TARGET_IP" 2>&1 | tee /tmp/bootstrap_output.log; then
|
if talosctl bootstrap --nodes "$TARGET_IP" 2>&1 | tee /tmp/bootstrap_output.log; then
|
||||||
@@ -425,14 +381,8 @@ if [ "${SKIP_HARDWARE}" = false ]; then
|
|||||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
|
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".disk" "$SELECTED_DISK"
|
||||||
|
|
||||||
# Copy current Talos version and schematic ID to this node
|
# Copy current Talos version and schematic ID to this node
|
||||||
current_talos_version=$(wild-config "cluster.nodes.talos.version")
|
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$talos_version"
|
||||||
current_schematic_id=$(wild-config "cluster.nodes.talos.schematicId")
|
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$schematic_id"
|
||||||
if [ -n "$current_talos_version" ] && [ "$current_talos_version" != "null" ]; then
|
|
||||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".version" "$current_talos_version"
|
|
||||||
fi
|
|
||||||
if [ -n "$current_schematic_id" ] && [ "$current_schematic_id" != "null" ]; then
|
|
||||||
wild-config-set "cluster.nodes.active.\"${NODE_NAME}\".schematicId" "$current_schematic_id"
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_success "Worker node $NODE_NAME registered successfully:"
|
print_success "Worker node $NODE_NAME registered successfully:"
|
||||||
print_info " - Name: $NODE_NAME"
|
print_info " - Name: $NODE_NAME"
|
||||||
|
@@ -65,9 +65,7 @@ if [ -z "$(wild-config "cluster.name")" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
print_header "Wild Cloud Services Setup"
|
print_header "Wild Cloud services setup"
|
||||||
print_info "Installing Kubernetes cluster services"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if ! command -v kubectl >/dev/null 2>&1; then
|
if ! command -v kubectl >/dev/null 2>&1; then
|
||||||
print_error "kubectl is not installed or not in PATH"
|
print_error "kubectl is not installed or not in PATH"
|
||||||
@@ -82,8 +80,8 @@ if ! kubectl cluster-info >/dev/null 2>&1; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Generate cluster services setup files
|
# Generate cluster services setup files
|
||||||
|
wild-cluster-services-fetch
|
||||||
wild-cluster-services-generate --force
|
wild-cluster-services-generate
|
||||||
|
|
||||||
# Apply cluster services to cluster
|
# Apply cluster services to cluster
|
||||||
|
|
||||||
|
@@ -4,28 +4,28 @@ set -e
|
|||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
# Parse arguments
|
# Parse arguments
|
||||||
UPDATE=false
|
FORCE=false
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--update)
|
--force)
|
||||||
UPDATE=true
|
FORCE=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
-h|--help)
|
-h|--help)
|
||||||
echo "Usage: $0 [--update]"
|
echo "Usage: $0 [--force]"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Copy Wild Cloud documentation to the current cloud directory."
|
echo "Copy Wild Cloud documentation to the current cloud directory."
|
||||||
echo ""
|
echo ""
|
||||||
echo "Options:"
|
echo "Options:"
|
||||||
echo " --update Update existing docs (overwrite)"
|
echo " --force Force overwrite of existing docs"
|
||||||
echo " -h, --help Show this help message"
|
echo " -h, --help Show this help message"
|
||||||
echo ""
|
echo ""
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
-*)
|
-*)
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
echo "Usage: $0 [--update]"
|
echo "Usage: $0 [--force]"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@@ -48,21 +48,21 @@ fi
|
|||||||
DOCS_DEST="${WC_HOME}/docs"
|
DOCS_DEST="${WC_HOME}/docs"
|
||||||
|
|
||||||
# Check if docs already exist
|
# Check if docs already exist
|
||||||
if [ -d "${DOCS_DEST}" ] && [ "${UPDATE}" = false ]; then
|
if [ -d "${DOCS_DEST}" ] && [ "${FORCE}" = false ]; then
|
||||||
echo "Documentation already exists at ${DOCS_DEST}"
|
print_warning "Documentation already exists at ${DOCS_DEST}"
|
||||||
read -p "Do you want to update documentation files? (y/N): " -n 1 -r
|
read -p "Do you want to update documentation files? (y/N): " -n 1 -r
|
||||||
echo
|
echo
|
||||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||||
UPDATE=true
|
FORCE=true
|
||||||
else
|
else
|
||||||
echo "Skipping documentation update."
|
print_info "Skipping documentation update."
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy docs directory from root to WC_HOME
|
# Copy docs directory from root to WC_HOME
|
||||||
if [ -d "${WC_ROOT}/docs" ]; then
|
if [ -d "${WC_ROOT}/docs" ]; then
|
||||||
if [ "${UPDATE}" = true ] && [ -d "${DOCS_DEST}" ]; then
|
if [ "${FORCE}" = true ] && [ -d "${DOCS_DEST}" ]; then
|
||||||
rm -rf "${DOCS_DEST}"
|
rm -rf "${DOCS_DEST}"
|
||||||
fi
|
fi
|
||||||
cp -r "${WC_ROOT}/docs" "${DOCS_DEST}"
|
cp -r "${WC_ROOT}/docs" "${DOCS_DEST}"
|
23
env.sh
23
env.sh
@@ -7,27 +7,4 @@ export WC_ROOT="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" && pwd)"
|
|||||||
# Add bin to path first so wild-config is available
|
# Add bin to path first so wild-config is available
|
||||||
export PATH="$WC_ROOT/bin:$PATH"
|
export PATH="$WC_ROOT/bin:$PATH"
|
||||||
|
|
||||||
# Install kubectl
|
|
||||||
if ! command -v kubectl &> /dev/null; then
|
|
||||||
echo "Error: kubectl is not installed. Installing."
|
|
||||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
|
||||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
|
||||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
|
||||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install talosctl
|
|
||||||
if ! command -v talosctl &> /dev/null; then
|
|
||||||
echo "Error: talosctl is not installed. Installing."
|
|
||||||
curl -sL https://talos.dev/install | sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Check if gomplate is installed
|
|
||||||
if ! command -v gomplate &> /dev/null; then
|
|
||||||
echo "Error: gomplate is not installed. Please install gomplate first."
|
|
||||||
echo "Visit: https://docs.gomplate.ca/installing/"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Wild Cloud root ready."
|
echo "Wild Cloud root ready."
|
||||||
|
@@ -84,7 +84,7 @@ prompt_with_default() {
|
|||||||
if [ -n "${default}" ]; then
|
if [ -n "${default}" ]; then
|
||||||
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
printf "%s [default: %s]: " "${prompt}" "${default}" >&2
|
||||||
else
|
else
|
||||||
printf "%s [default: empty]: " "${prompt}" >&2
|
printf "%s: " "${prompt}" >&2
|
||||||
fi
|
fi
|
||||||
read -r result
|
read -r result
|
||||||
if [ -z "${result}" ]; then
|
if [ -z "${result}" ]; then
|
||||||
@@ -109,16 +109,18 @@ prompt_if_unset_config() {
|
|||||||
local prompt="$2"
|
local prompt="$2"
|
||||||
local default="$3"
|
local default="$3"
|
||||||
|
|
||||||
local current_value
|
# Check if key exists first to avoid error messages
|
||||||
current_value=$(wild-config "${config_path}")
|
if wild-config --check "${config_path}"; then
|
||||||
|
# Key exists, get its value
|
||||||
if [ -z "${current_value}" ] || [ "${current_value}" = "null" ]; then
|
local current_value
|
||||||
|
current_value=$(wild-config "${config_path}")
|
||||||
|
print_info "Using existing ${config_path} = ${current_value}"
|
||||||
|
else
|
||||||
|
# Key doesn't exist, prompt for it
|
||||||
local new_value
|
local new_value
|
||||||
new_value=$(prompt_with_default "${prompt}" "${default}" "")
|
new_value=$(prompt_with_default "${prompt}" "${default}" "")
|
||||||
wild-config-set "${config_path}" "${new_value}"
|
wild-config-set "${config_path}" "${new_value}"
|
||||||
print_info "Set ${config_path} = ${new_value}"
|
print_info "Set ${config_path} = ${new_value}"
|
||||||
else
|
|
||||||
print_info "Using existing ${config_path} = ${current_value}"
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,16 +130,16 @@ prompt_if_unset_secret() {
|
|||||||
local prompt="$2"
|
local prompt="$2"
|
||||||
local default="$3"
|
local default="$3"
|
||||||
|
|
||||||
local current_value
|
# Check if key exists first to avoid error messages
|
||||||
current_value=$(wild-secret "${secret_path}")
|
if wild-secret --check "${secret_path}"; then
|
||||||
|
# Key exists, we don't show the value for security
|
||||||
if [ -z "${current_value}" ] || [ "${current_value}" = "null" ]; then
|
print_info "Using existing secret ${secret_path}"
|
||||||
|
else
|
||||||
|
# Key doesn't exist, prompt for it
|
||||||
local new_value
|
local new_value
|
||||||
new_value=$(prompt_with_default "${prompt}" "${default}" "")
|
new_value=$(prompt_with_default "${prompt}" "${default}" "")
|
||||||
wild-secret-set "${secret_path}" "${new_value}"
|
wild-secret-set "${secret_path}" "${new_value}"
|
||||||
print_info "Set secret ${secret_path}"
|
print_info "Set secret ${secret_path}"
|
||||||
else
|
|
||||||
print_info "Using existing secret ${secret_path}"
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,7 +170,7 @@ init_wild_env() {
|
|||||||
if [ -z "${WC_ROOT}" ]; then
|
if [ -z "${WC_ROOT}" ]; then
|
||||||
echo "ERROR: WC_ROOT is not set."
|
echo "ERROR: WC_ROOT is not set."
|
||||||
exit 1
|
exit 1
|
||||||
else
|
fi
|
||||||
|
|
||||||
# Check if WC_ROOT is a valid directory
|
# Check if WC_ROOT is a valid directory
|
||||||
if [ ! -d "${WC_ROOT}" ]; then
|
if [ ! -d "${WC_ROOT}" ]; then
|
||||||
@@ -187,6 +189,33 @@ init_wild_env() {
|
|||||||
echo "ERROR: This command must be run from within a wildcloud home directory."
|
echo "ERROR: This command must be run from within a wildcloud home directory."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check kubectl
|
||||||
|
if ! command -v kubectl &> /dev/null; then
|
||||||
|
echo "Error: kubectl is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check talosctl
|
||||||
|
if ! command -v talosctl &> /dev/null; then
|
||||||
|
echo "Error: talosctl is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check gomplate
|
||||||
|
if ! command -v gomplate &> /dev/null; then
|
||||||
|
echo "Error: gomplate is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check yq
|
||||||
|
if ! command -v yq &> /dev/null; then
|
||||||
|
echo "Error: yq is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check restic
|
||||||
|
if ! command -v restic &> /dev/null; then
|
||||||
|
echo "Error: restic is not installed. Please run $WC_ROOT/scripts/install-wild-cloud-dependencies.sh."
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,5 +1,14 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Install kubectl
|
||||||
|
if ! command -v kubectl &> /dev/null; then
|
||||||
|
echo "Error: kubectl is not installed. Installing."
|
||||||
|
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||||
|
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||||
|
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||||
|
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||||
|
fi
|
||||||
|
|
||||||
# Install gomplate
|
# Install gomplate
|
||||||
if command -v gomplate &> /dev/null; then
|
if command -v gomplate &> /dev/null; then
|
||||||
echo "gomplate is already installed."
|
echo "gomplate is already installed."
|
@@ -9,11 +9,13 @@ Follow the instructions to [set up cluster nodes](./cluster-nodes/README.md).
|
|||||||
Set up cluster services:
|
Set up cluster services:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./setup/cluster/setup-all.sh
|
wild-cluster-services-fetch
|
||||||
|
wild-cluster-services-configure
|
||||||
|
wild-cluster-services-up
|
||||||
```
|
```
|
||||||
|
|
||||||
Now make sure everything works:
|
Now make sure everything works:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./setup/cluster/validate-setup.sh
|
wild-health
|
||||||
```
|
```
|
||||||
|
@@ -45,3 +45,18 @@
|
|||||||
- siderolabs/nvidia-open-gpu-kernel-modules-lts
|
- siderolabs/nvidia-open-gpu-kernel-modules-lts
|
||||||
- siderolabs/nvidia-open-gpu-kernel-modules-production
|
- siderolabs/nvidia-open-gpu-kernel-modules-production
|
||||||
- siderolabs/util-linux-tools"
|
- siderolabs/util-linux-tools"
|
||||||
|
"56774e0894c8a3a3a9834a2aea65f24163cacf9506abbcbdc3ba135eaca4953f":
|
||||||
|
version: "v1.11.0"
|
||||||
|
architecture: "amd64"
|
||||||
|
secureBoot: false
|
||||||
|
schematic:
|
||||||
|
customization:
|
||||||
|
systemExtensions:
|
||||||
|
officialExtensions:
|
||||||
|
- siderolabs/gvisor
|
||||||
|
- siderolabs/intel-ucode
|
||||||
|
- siderolabs/iscsi-tools
|
||||||
|
- siderolabs/nvidia-container-toolkit-production
|
||||||
|
- siderolabs/nvidia-fabricmanager-production
|
||||||
|
- siderolabs/nvidia-open-gpu-kernel-modules-production
|
||||||
|
- siderolabs/util-linux-tools
|
||||||
|
10
setup/cluster-services/cert-manager/configure.sh
Normal file
10
setup/cluster-services/cert-manager/configure.sh
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting cert-manager configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cloud.domain" "Enter main domain name" "example.com"
|
||||||
|
domain=$(wild-config "cloud.domain")
|
||||||
|
prompt_if_unset_config "cloud.internalDomain" "Enter internal domain name" "local.${domain}"
|
||||||
|
prompt_if_unset_config "operator.email" "Enter operator email address (for Let's Encrypt)" ""
|
||||||
|
prompt_if_unset_config "cluster.certManager.cloudflare.domain" "Enter Cloudflare domain (for DNS challenges)" "${domain}"
|
||||||
|
prompt_if_unset_secret "cloudflare.token" "Enter Cloudflare API token (for DNS challenges)" ""
|
@@ -16,21 +16,6 @@ CERT_MANAGER_DIR="${CLUSTER_SETUP_DIR}/cert-manager"
|
|||||||
|
|
||||||
print_header "Setting up cert-manager"
|
print_header "Setting up cert-manager"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting cert-manager configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cloud.domain" "Enter main domain name" "example.com"
|
|
||||||
|
|
||||||
# Get the domain value to use as default for internal domain
|
|
||||||
domain=$(wild-config "cloud.domain")
|
|
||||||
prompt_if_unset_config "cloud.internalDomain" "Enter internal domain name" "local.${domain}"
|
|
||||||
prompt_if_unset_config "operator.email" "Enter operator email address (for Let's Encrypt)" ""
|
|
||||||
prompt_if_unset_config "cluster.certManager.cloudflare.domain" "Enter Cloudflare domain (for DNS challenges)" "${domain}"
|
|
||||||
prompt_if_unset_secret "cloudflare.token" "Enter Cloudflare API token (for DNS challenges)" ""
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled cert-manager templates..."
|
echo "Using pre-compiled cert-manager templates..."
|
||||||
if [ ! -d "${CERT_MANAGER_DIR}/kustomize" ]; then
|
if [ ! -d "${CERT_MANAGER_DIR}/kustomize" ]; then
|
||||||
|
7
setup/cluster-services/coredns/configure.sh
Normal file
7
setup/cluster-services/coredns/configure.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting CoreDNS configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cloud.internalDomain" "Enter internal domain name" "local.example.com"
|
||||||
|
prompt_if_unset_config "cluster.loadBalancerIp" "Enter load balancer IP address" "192.168.1.240"
|
||||||
|
prompt_if_unset_config "cloud.dns.externalResolver" "Enter external DNS resolver" "8.8.8.8"
|
@@ -16,16 +16,6 @@ COREDNS_DIR="${CLUSTER_SETUP_DIR}/coredns"
|
|||||||
|
|
||||||
print_header "Setting up CoreDNS for k3s"
|
print_header "Setting up CoreDNS for k3s"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting CoreDNS configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cloud.internalDomain" "Enter internal domain name" "local.example.com"
|
|
||||||
prompt_if_unset_config "cluster.loadBalancerIp" "Enter load balancer IP address" "192.168.1.240"
|
|
||||||
prompt_if_unset_config "cloud.dns.externalResolver" "Enter external DNS resolver" "8.8.8.8"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled CoreDNS templates..."
|
echo "Using pre-compiled CoreDNS templates..."
|
||||||
if [ ! -d "${COREDNS_DIR}/kustomize" ]; then
|
if [ ! -d "${COREDNS_DIR}/kustomize" ]; then
|
||||||
|
6
setup/cluster-services/docker-registry/configure.sh
Normal file
6
setup/cluster-services/docker-registry/configure.sh
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting Docker Registry configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cloud.dockerRegistryHost" "Enter Docker Registry hostname" "registry.local.example.com"
|
||||||
|
prompt_if_unset_config "cluster.dockerRegistry.storage" "Enter Docker Registry storage size" "100Gi"
|
@@ -16,15 +16,6 @@ DOCKER_REGISTRY_DIR="${CLUSTER_SETUP_DIR}/docker-registry"
|
|||||||
|
|
||||||
print_header "Setting up Docker Registry"
|
print_header "Setting up Docker Registry"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting Docker Registry configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cloud.dockerRegistryHost" "Enter Docker Registry hostname" "registry.local.example.com"
|
|
||||||
prompt_if_unset_config "cluster.dockerRegistry.storage" "Enter Docker Registry storage size" "100Gi"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled Docker Registry templates..."
|
echo "Using pre-compiled Docker Registry templates..."
|
||||||
if [ ! -d "${DOCKER_REGISTRY_DIR}/kustomize" ]; then
|
if [ ! -d "${DOCKER_REGISTRY_DIR}/kustomize" ]; then
|
||||||
|
3
setup/cluster-services/externaldns/configure.sh
Normal file
3
setup/cluster-services/externaldns/configure.sh
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
print_info "Collecting ExternalDNS configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cluster.externalDns.ownerId" "Enter ExternalDNS owner ID (unique identifier for this cluster)" "wild-cloud-$(hostname -s)"
|
@@ -16,14 +16,6 @@ EXTERNALDNS_DIR="${CLUSTER_SETUP_DIR}/externaldns"
|
|||||||
|
|
||||||
print_header "Setting up ExternalDNS"
|
print_header "Setting up ExternalDNS"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting ExternalDNS configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cluster.externalDns.ownerId" "Enter ExternalDNS owner ID (unique identifier for this cluster)" "wild-cloud-$(hostname -s)"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled ExternalDNS templates..."
|
echo "Using pre-compiled ExternalDNS templates..."
|
||||||
if [ ! -d "${EXTERNALDNS_DIR}/kustomize" ]; then
|
if [ ! -d "${EXTERNALDNS_DIR}/kustomize" ]; then
|
||||||
|
@@ -1,22 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Navigate to script directory
|
|
||||||
SCRIPT_PATH="$(realpath "${BASH_SOURCE[0]}")"
|
|
||||||
SCRIPT_DIR="$(dirname "$SCRIPT_PATH")"
|
|
||||||
cd "$SCRIPT_DIR"
|
|
||||||
|
|
||||||
echo "Setting up your wild-cloud cluster services..."
|
|
||||||
echo
|
|
||||||
|
|
||||||
./metallb/install.sh
|
|
||||||
./longhorn/install.sh
|
|
||||||
./traefik/install.sh
|
|
||||||
./coredns/install.sh
|
|
||||||
./cert-manager/install.sh
|
|
||||||
./externaldns/install.sh
|
|
||||||
./kubernetes-dashboard/install.sh
|
|
||||||
./nfs/install.sh
|
|
||||||
./docker-registry/install.sh
|
|
||||||
|
|
||||||
echo "Service setup complete!"
|
|
5
setup/cluster-services/kubernetes-dashboard/configure.sh
Normal file
5
setup/cluster-services/kubernetes-dashboard/configure.sh
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting Kubernetes Dashboard configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cloud.internalDomain" "Enter internal domain name (for dashboard URL)" "local.example.com"
|
@@ -16,14 +16,6 @@ KUBERNETES_DASHBOARD_DIR="${CLUSTER_SETUP_DIR}/kubernetes-dashboard"
|
|||||||
|
|
||||||
print_header "Setting up Kubernetes Dashboard"
|
print_header "Setting up Kubernetes Dashboard"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting Kubernetes Dashboard configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cloud.internalDomain" "Enter internal domain name (for dashboard URL)" "local.example.com"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled Dashboard templates..."
|
echo "Using pre-compiled Dashboard templates..."
|
||||||
if [ ! -d "${KUBERNETES_DASHBOARD_DIR}/kustomize" ]; then
|
if [ ! -d "${KUBERNETES_DASHBOARD_DIR}/kustomize" ]; then
|
||||||
|
6
setup/cluster-services/metallb/configure.sh
Normal file
6
setup/cluster-services/metallb/configure.sh
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting MetalLB configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cluster.ipAddressPool" "Enter IP address pool for MetalLB (CIDR format, e.g., 192.168.1.240-192.168.1.250)" "192.168.1.240-192.168.1.250"
|
||||||
|
prompt_if_unset_config "cluster.loadBalancerIp" "Enter load balancer IP address" "192.168.1.240"
|
@@ -16,15 +16,6 @@ METALLB_DIR="${CLUSTER_SETUP_DIR}/metallb"
|
|||||||
|
|
||||||
print_header "Setting up MetalLB"
|
print_header "Setting up MetalLB"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting MetalLB configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cluster.ipAddressPool" "Enter IP address pool for MetalLB (CIDR format, e.g., 192.168.1.240-192.168.1.250)" "192.168.1.240-192.168.1.250"
|
|
||||||
prompt_if_unset_config "cluster.loadBalancerIp" "Enter load balancer IP address" "192.168.1.240"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled MetalLB templates..."
|
echo "Using pre-compiled MetalLB templates..."
|
||||||
if [ ! -d "${METALLB_DIR}/kustomize" ]; then
|
if [ ! -d "${METALLB_DIR}/kustomize" ]; then
|
||||||
|
7
setup/cluster-services/nfs/configure.sh
Normal file
7
setup/cluster-services/nfs/configure.sh
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting NFS configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cloud.nfs.host" "Enter NFS server hostname or IP address" "192.168.1.100"
|
||||||
|
prompt_if_unset_config "cloud.nfs.mediaPath" "Enter NFS export path for media storage" "/mnt/storage/media"
|
||||||
|
prompt_if_unset_config "cloud.nfs.storageCapacity" "Enter NFS storage capacity (e.g., 1Ti, 500Gi)" "1Ti"
|
@@ -16,16 +16,6 @@ NFS_DIR="${CLUSTER_SETUP_DIR}/nfs"
|
|||||||
|
|
||||||
print_header "Registering NFS server with Kubernetes cluster"
|
print_header "Registering NFS server with Kubernetes cluster"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting NFS configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cloud.nfs.host" "Enter NFS server hostname or IP address" "192.168.1.100"
|
|
||||||
prompt_if_unset_config "cloud.nfs.mediaPath" "Enter NFS export path for media storage" "/mnt/storage/media"
|
|
||||||
prompt_if_unset_config "cloud.nfs.storageCapacity" "Enter NFS storage capacity (e.g., 1Ti, 500Gi)" "1Ti"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Templates should already be compiled by wild-cluster-services-generate
|
# Templates should already be compiled by wild-cluster-services-generate
|
||||||
echo "Using pre-compiled NFS templates..."
|
echo "Using pre-compiled NFS templates..."
|
||||||
if [ ! -d "${NFS_DIR}/kustomize" ]; then
|
if [ ! -d "${NFS_DIR}/kustomize" ]; then
|
||||||
|
@@ -1,15 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Initialize Wild Cloud environment
|
|
||||||
if [ -z "${WC_ROOT}" ]; then
|
|
||||||
print "WC_ROOT is not set."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
source "${WC_ROOT}/scripts/common.sh"
|
|
||||||
init_wild_env
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_header "Setting up SMTP Configuration"
|
print_header "Setting up SMTP Configuration"
|
||||||
|
|
||||||
@@ -19,7 +8,6 @@ echo ""
|
|||||||
|
|
||||||
# Collect SMTP configuration
|
# Collect SMTP configuration
|
||||||
print_info "Collecting SMTP configuration..."
|
print_info "Collecting SMTP configuration..."
|
||||||
|
|
||||||
prompt_if_unset_config "cloud.smtp.host" "Enter SMTP host (e.g., email-smtp.us-east-2.amazonaws.com for AWS SES)" ""
|
prompt_if_unset_config "cloud.smtp.host" "Enter SMTP host (e.g., email-smtp.us-east-2.amazonaws.com for AWS SES)" ""
|
||||||
prompt_if_unset_config "cloud.smtp.port" "Enter SMTP port (usually 465 for SSL, 587 for STARTTLS)" "465"
|
prompt_if_unset_config "cloud.smtp.port" "Enter SMTP port (usually 465 for SSL, 587 for STARTTLS)" "465"
|
||||||
prompt_if_unset_config "cloud.smtp.user" "Enter SMTP username/access key" ""
|
prompt_if_unset_config "cloud.smtp.user" "Enter SMTP username/access key" ""
|
||||||
@@ -47,7 +35,3 @@ echo " User: $(wild-config cloud.smtp.user)"
|
|||||||
echo " From: $(wild-config cloud.smtp.from)"
|
echo " From: $(wild-config cloud.smtp.from)"
|
||||||
echo " Password: $(wild-secret cloud.smtp.password >/dev/null 2>&1 && echo "✓ Set" || echo "✗ Not set")"
|
echo " Password: $(wild-secret cloud.smtp.password >/dev/null 2>&1 && echo "✓ Set" || echo "✗ Not set")"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Applications that use SMTP: ghost, gitea, and others"
|
|
||||||
echo ""
|
|
||||||
echo "To test SMTP configuration, deploy an app that uses email (like Ghost)"
|
|
||||||
echo "and try the password reset or user invitation features."
|
|
5
setup/cluster-services/traefik/configure.sh
Normal file
5
setup/cluster-services/traefik/configure.sh
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
print_info "Collecting Traefik configuration..."
|
||||||
|
|
||||||
|
prompt_if_unset_config "cluster.loadBalancerIp" "Enter load balancer IP address for Traefik" "192.168.1.240"
|
@@ -16,14 +16,6 @@ TRAEFIK_DIR="${CLUSTER_SETUP_DIR}/traefik"
|
|||||||
|
|
||||||
print_header "Setting up Traefik ingress controller"
|
print_header "Setting up Traefik ingress controller"
|
||||||
|
|
||||||
# Collect required configuration variables
|
|
||||||
print_info "Collecting Traefik configuration..."
|
|
||||||
|
|
||||||
# Prompt for configuration using helper functions
|
|
||||||
prompt_if_unset_config "cluster.loadBalancerIp" "Enter load balancer IP address for Traefik" "192.168.1.240"
|
|
||||||
|
|
||||||
print_success "Configuration collected successfully"
|
|
||||||
|
|
||||||
# Install required CRDs first
|
# Install required CRDs first
|
||||||
echo "Installing Gateway API CRDs..."
|
echo "Installing Gateway API CRDs..."
|
||||||
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml
|
kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.0.0/standard-install.yaml
|
||||||
|
Reference in New Issue
Block a user