Lint fixes.
This commit is contained in:
@@ -292,12 +292,9 @@ func (api *API) GetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
respondJSON(w, http.StatusOK, configMap)
|
||||
}
|
||||
|
||||
// UpdateConfig updates instance configuration
|
||||
func (api *API) UpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
name := vars["name"]
|
||||
|
||||
if err := api.instance.ValidateInstance(name); err != nil {
|
||||
// updateYAMLFile updates a YAML file with the provided key-value pairs
|
||||
func (api *API) updateYAMLFile(w http.ResponseWriter, r *http.Request, instanceName, fileType string, updateFunc func(string, string, string) error) {
|
||||
if err := api.instance.ValidateInstance(instanceName); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
}
|
||||
@@ -314,22 +311,40 @@ func (api *API) UpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(name)
|
||||
var filePath string
|
||||
if fileType == "config" {
|
||||
filePath = api.instance.GetInstanceConfigPath(instanceName)
|
||||
} else {
|
||||
filePath = api.instance.GetInstanceSecretsPath(instanceName)
|
||||
}
|
||||
|
||||
// Update each key-value pair
|
||||
for key, value := range updates {
|
||||
valueStr := fmt.Sprintf("%v", value)
|
||||
if err := api.config.SetConfigValue(configPath, key, valueStr); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update config key %s: %v", key, err))
|
||||
if err := updateFunc(filePath, key, valueStr); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update %s key %s: %v", fileType, key, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Capitalize first letter of fileType for message
|
||||
fileTypeCap := fileType
|
||||
if len(fileType) > 0 {
|
||||
fileTypeCap = string(fileType[0]-32) + fileType[1:]
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "Config updated successfully",
|
||||
"message": fmt.Sprintf("%s updated successfully", fileTypeCap),
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateConfig updates instance configuration
|
||||
func (api *API) UpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
name := vars["name"]
|
||||
api.updateYAMLFile(w, r, name, "config", api.config.SetConfigValue)
|
||||
}
|
||||
|
||||
// GetSecrets retrieves instance secrets (redacted by default)
|
||||
func (api *API) GetSecrets(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
@@ -375,39 +390,7 @@ func (api *API) GetSecrets(w http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) UpdateSecrets(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
name := vars["name"]
|
||||
|
||||
if err := api.instance.ValidateInstance(name); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusBadRequest, "Failed to read request body")
|
||||
return
|
||||
}
|
||||
|
||||
var updates map[string]interface{}
|
||||
if err := yaml.Unmarshal(body, &updates); err != nil {
|
||||
respondError(w, http.StatusBadRequest, fmt.Sprintf("Invalid YAML: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Get secrets file path
|
||||
secretsPath := api.instance.GetInstanceSecretsPath(name)
|
||||
|
||||
// Update each secret
|
||||
for key, value := range updates {
|
||||
valueStr := fmt.Sprintf("%v", value)
|
||||
if err := api.secrets.SetSecret(secretsPath, key, valueStr); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update secret %s: %v", key, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "Secrets updated successfully",
|
||||
})
|
||||
api.updateYAMLFile(w, r, name, "secrets", api.secrets.SetSecret)
|
||||
}
|
||||
|
||||
// GetContext retrieves current context
|
||||
@@ -487,7 +470,7 @@ func (api *API) StatusHandler(w http.ResponseWriter, r *http.Request, startTime
|
||||
func respondJSON(w http.ResponseWriter, status int, data interface{}) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
json.NewEncoder(w).Encode(data)
|
||||
_ = json.NewEncoder(w).Encode(data)
|
||||
}
|
||||
|
||||
func respondError(w http.ResponseWriter, status int, message string) {
|
||||
|
||||
@@ -106,80 +106,62 @@ func (api *API) AppsAdd(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
}
|
||||
|
||||
// AppsDeploy deploys an app to the cluster
|
||||
func (api *API) AppsDeploy(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
appName := vars["app"]
|
||||
|
||||
// startAppOperation starts an app operation (deploy or delete) in the background
|
||||
func (api *API) startAppOperation(w http.ResponseWriter, instanceName, appName, operationType, successMessage string, operation func(*apps.Manager, string, string) error) {
|
||||
// Validate instance exists
|
||||
if err := api.instance.ValidateInstance(instanceName); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Start deploy operation
|
||||
// Start operation
|
||||
opsMgr := operations.NewManager(api.dataDir)
|
||||
opID, err := opsMgr.Start(instanceName, "deploy_app", appName)
|
||||
opID, err := opsMgr.Start(instanceName, operationType, appName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to start operation: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Deploy in background
|
||||
// Execute operation in background
|
||||
go func() {
|
||||
appsMgr := apps.NewManager(api.dataDir, api.appsDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := appsMgr.Deploy(instanceName, appName); err != nil {
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
if err := operation(appsMgr, instanceName, appName); err != nil {
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
opsMgr.Update(instanceName, opID, "completed", "App deployed", 100)
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", successMessage, 100)
|
||||
}
|
||||
}()
|
||||
|
||||
respondJSON(w, http.StatusAccepted, map[string]string{
|
||||
"operation_id": opID,
|
||||
"message": "App deployment initiated",
|
||||
"message": fmt.Sprintf("App %s initiated", operationType),
|
||||
})
|
||||
}
|
||||
|
||||
// AppsDeploy deploys an app to the cluster
|
||||
func (api *API) AppsDeploy(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
appName := vars["app"]
|
||||
|
||||
api.startAppOperation(w, instanceName, appName, "deploy_app", "App deployed",
|
||||
func(mgr *apps.Manager, instance, app string) error {
|
||||
return mgr.Deploy(instance, app)
|
||||
})
|
||||
}
|
||||
|
||||
// AppsDelete deletes an app
|
||||
func (api *API) AppsDelete(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
appName := vars["app"]
|
||||
|
||||
// Validate instance exists
|
||||
if err := api.instance.ValidateInstance(instanceName); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Start delete operation
|
||||
opsMgr := operations.NewManager(api.dataDir)
|
||||
opID, err := opsMgr.Start(instanceName, "delete_app", appName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to start operation: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Delete in background
|
||||
go func() {
|
||||
appsMgr := apps.NewManager(api.dataDir, api.appsDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := appsMgr.Delete(instanceName, appName); err != nil {
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
opsMgr.Update(instanceName, opID, "completed", "App deleted", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
respondJSON(w, http.StatusAccepted, map[string]string{
|
||||
"operation_id": opID,
|
||||
"message": "App deletion initiated",
|
||||
})
|
||||
api.startAppOperation(w, instanceName, appName, "delete_app", "App deleted",
|
||||
func(mgr *apps.Manager, instance, app string) error {
|
||||
return mgr.Delete(instance, app)
|
||||
})
|
||||
}
|
||||
|
||||
// AppsGetStatus returns app status
|
||||
|
||||
@@ -27,15 +27,15 @@ func (api *API) BackupAppStart(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Run backup in background
|
||||
go func() {
|
||||
opMgr.UpdateProgress(instanceName, opID, 10, "Starting backup")
|
||||
_ = opMgr.UpdateProgress(instanceName, opID, 10, "Starting backup")
|
||||
|
||||
info, err := mgr.BackupApp(instanceName, appName)
|
||||
if err != nil {
|
||||
opMgr.Update(instanceName, opID, "failed", err.Error(), 100)
|
||||
_ = opMgr.Update(instanceName, opID, "failed", err.Error(), 100)
|
||||
return
|
||||
}
|
||||
|
||||
opMgr.Update(instanceName, opID, "completed", "Backup completed", 100)
|
||||
_ = opMgr.Update(instanceName, opID, "completed", "Backup completed", 100)
|
||||
_ = info // Metadata saved in backup.json
|
||||
}()
|
||||
|
||||
@@ -92,14 +92,14 @@ func (api *API) BackupAppRestore(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Run restore in background
|
||||
go func() {
|
||||
opMgr.UpdateProgress(instanceName, opID, 10, "Starting restore")
|
||||
_ = opMgr.UpdateProgress(instanceName, opID, 10, "Starting restore")
|
||||
|
||||
if err := mgr.RestoreApp(instanceName, appName, opts); err != nil {
|
||||
opMgr.Update(instanceName, opID, "failed", err.Error(), 100)
|
||||
_ = opMgr.Update(instanceName, opID, "failed", err.Error(), 100)
|
||||
return
|
||||
}
|
||||
|
||||
opMgr.Update(instanceName, opID, "completed", "Restore completed", 100)
|
||||
_ = opMgr.Update(instanceName, opID, "completed", "Restore completed", 100)
|
||||
}()
|
||||
|
||||
respondJSON(w, http.StatusAccepted, map[string]interface{}{
|
||||
|
||||
@@ -101,12 +101,12 @@ func (api *API) ClusterBootstrap(w http.ResponseWriter, r *http.Request) {
|
||||
// Bootstrap in background
|
||||
go func() {
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := clusterMgr.Bootstrap(instanceName, req.Node); err != nil {
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
opsMgr.Update(instanceName, opID, "completed", "Bootstrap completed", 100)
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", "Bootstrap completed", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -315,12 +315,12 @@ func (api *API) ClusterReset(w http.ResponseWriter, r *http.Request) {
|
||||
// Reset in background
|
||||
go func() {
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := clusterMgr.Reset(instanceName, req.Confirm); err != nil {
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
opsMgr.Update(instanceName, opID, "completed", "Cluster reset completed", 100)
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", "Cluster reset completed", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -105,20 +105,20 @@ func (api *API) ServicesInstall(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Printf("[ERROR] Service install goroutine panic: %v\n", r)
|
||||
opsMgr.Update(instanceName, opID, "failed", fmt.Sprintf("Internal error: %v", r), 0)
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", fmt.Sprintf("Internal error: %v", r), 0)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("[DEBUG] Service install goroutine started: service=%s instance=%s opID=%s\n", req.Name, instanceName, opID)
|
||||
servicesMgr := services.NewManager(api.dataDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := servicesMgr.Install(instanceName, req.Name, req.Fetch, req.Deploy, opID, api.broadcaster); err != nil {
|
||||
fmt.Printf("[DEBUG] Service install failed: %v\n", err)
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
fmt.Printf("[DEBUG] Service install completed successfully\n")
|
||||
opsMgr.Update(instanceName, opID, "completed", "Service installed", 100)
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", "Service installed", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -161,12 +161,12 @@ func (api *API) ServicesInstallAll(w http.ResponseWriter, r *http.Request) {
|
||||
// Install in background
|
||||
go func() {
|
||||
servicesMgr := services.NewManager(api.dataDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := servicesMgr.InstallAll(instanceName, req.Fetch, req.Deploy, opID, api.broadcaster); err != nil {
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
opsMgr.Update(instanceName, opID, "completed", "All services installed", 100)
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", "All services installed", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -199,12 +199,12 @@ func (api *API) ServicesDelete(w http.ResponseWriter, r *http.Request) {
|
||||
// Delete in background
|
||||
go func() {
|
||||
servicesMgr := services.NewManager(api.dataDir)
|
||||
opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := servicesMgr.Delete(instanceName, serviceName); err != nil {
|
||||
opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
opsMgr.Update(instanceName, opID, "completed", "Service deleted", 100)
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", "Service deleted", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -515,7 +515,7 @@ func (api *API) ServicesUpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Validate request
|
||||
if update.Config == nil || len(update.Config) == 0 {
|
||||
if len(update.Config) == 0 {
|
||||
respondError(w, http.StatusBadRequest, "config field is required and must not be empty")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -323,7 +323,7 @@ func (m *Manager) Deploy(instanceName, appName string) error {
|
||||
applyNsCmd := exec.Command("kubectl", "apply", "-f", "-")
|
||||
applyNsCmd.Stdin = bytes.NewReader(namespaceYaml)
|
||||
tools.WithKubeconfig(applyNsCmd, kubeconfigPath)
|
||||
applyNsCmd.CombinedOutput() // Ignore errors - namespace might already exist
|
||||
_, _ = applyNsCmd.CombinedOutput() // Ignore errors - namespace might already exist
|
||||
|
||||
// Create Kubernetes secrets from secrets.yaml
|
||||
if storage.FileExists(secretsFile) {
|
||||
@@ -334,7 +334,7 @@ func (m *Manager) Deploy(instanceName, appName string) error {
|
||||
// Delete existing secret if it exists (to update it)
|
||||
deleteCmd := exec.Command("kubectl", "delete", "secret", fmt.Sprintf("%s-secrets", appName), "-n", appName, "--ignore-not-found")
|
||||
tools.WithKubeconfig(deleteCmd, kubeconfigPath)
|
||||
deleteCmd.CombinedOutput()
|
||||
_, _ = deleteCmd.CombinedOutput()
|
||||
|
||||
// Create secret from literals
|
||||
createSecretCmd := exec.Command("kubectl", "create", "secret", "generic", fmt.Sprintf("%s-secrets", appName), "-n", appName)
|
||||
@@ -390,7 +390,7 @@ func (m *Manager) Delete(instanceName, appName string) error {
|
||||
// Wait for namespace deletion to complete (timeout after 60s)
|
||||
waitCmd := exec.Command("kubectl", "wait", "--for=delete", "namespace", appName, "--timeout=60s")
|
||||
tools.WithKubeconfig(waitCmd, kubeconfigPath)
|
||||
waitCmd.CombinedOutput() // Ignore errors - namespace might not exist
|
||||
_, _ = waitCmd.CombinedOutput() // Ignore errors - namespace might not exist
|
||||
|
||||
// Delete local app configuration directory
|
||||
if err := os.RemoveAll(appDir); err != nil {
|
||||
|
||||
@@ -127,7 +127,7 @@ func (m *Manager) runDiscovery(instanceName string, ipList []string) {
|
||||
|
||||
status, _ := m.GetDiscoveryStatus(instanceName)
|
||||
status.Active = false
|
||||
m.writeDiscoveryStatus(instanceName, status)
|
||||
_ = m.writeDiscoveryStatus(instanceName, status)
|
||||
}()
|
||||
|
||||
// Discover nodes by probing each IP
|
||||
@@ -146,7 +146,7 @@ func (m *Manager) runDiscovery(instanceName string, ipList []string) {
|
||||
m.discoveryMu.Lock()
|
||||
status, _ := m.GetDiscoveryStatus(instanceName)
|
||||
status.NodesFound = discoveredNodes
|
||||
m.writeDiscoveryStatus(instanceName, status)
|
||||
_ = m.writeDiscoveryStatus(instanceName, status)
|
||||
m.discoveryMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,16 +535,6 @@ func (m *Manager) extractEmbeddedTemplates(destDir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst
|
||||
func (m *Manager) copyFile(src, dst string) error {
|
||||
data, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(dst, data, 0644)
|
||||
}
|
||||
|
||||
// updateNodeStatus updates node status flags in config.yaml
|
||||
func (m *Manager) updateNodeStatus(instanceName string, node *Node) error {
|
||||
instancePath := m.GetInstancePath(instanceName)
|
||||
|
||||
@@ -230,7 +230,7 @@ func (m *Manager) Cleanup(instanceName string, olderThan time.Duration) error {
|
||||
for _, op := range ops {
|
||||
if (op.Status == "completed" || op.Status == "failed" || op.Status == "cancelled") &&
|
||||
!op.EndedAt.IsZero() && op.EndedAt.Before(cutoff) {
|
||||
m.Delete(instanceName, op.ID)
|
||||
_ = m.Delete(instanceName, op.ID)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -217,7 +217,7 @@ func (m *Manager) StreamLogs(instanceName, serviceName string, opts contracts.Se
|
||||
Container: opts.Container,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
writeSSEEvent(writer, event)
|
||||
_ = writeSSEEvent(writer, event)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -246,14 +246,14 @@ func (m *Manager) StreamLogs(instanceName, serviceName string, opts contracts.Se
|
||||
|
||||
// Wait for completion or error
|
||||
err = <-done
|
||||
cmd.Process.Kill()
|
||||
_ = cmd.Process.Kill()
|
||||
|
||||
// Send end event
|
||||
endEvent := contracts.ServiceLogsSSEEvent{
|
||||
Type: "end",
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
writeSSEEvent(writer, endEvent)
|
||||
_ = writeSSEEvent(writer, endEvent)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -346,7 +346,7 @@ func (m *Manager) Fetch(instanceName, serviceName string) error {
|
||||
|
||||
// Extract README.md if it exists
|
||||
if readmeData, err := setup.GetServiceFile(serviceName, "README.md"); err == nil {
|
||||
os.WriteFile(filepath.Join(instanceDir, "README.md"), readmeData, 0644)
|
||||
_ = os.WriteFile(filepath.Join(instanceDir, "README.md"), readmeData, 0644)
|
||||
}
|
||||
|
||||
// Extract install.sh if it exists
|
||||
@@ -359,7 +359,7 @@ func (m *Manager) Fetch(instanceName, serviceName string) error {
|
||||
|
||||
// Extract wild-manifest.yaml
|
||||
if manifestData, err := setup.GetServiceFile(serviceName, "wild-manifest.yaml"); err == nil {
|
||||
os.WriteFile(filepath.Join(instanceDir, "wild-manifest.yaml"), manifestData, 0644)
|
||||
_ = os.WriteFile(filepath.Join(instanceDir, "wild-manifest.yaml"), manifestData, 0644)
|
||||
}
|
||||
|
||||
// Extract kustomize.template directory
|
||||
@@ -394,52 +394,6 @@ func dirExists(path string) bool {
|
||||
return err == nil && info.IsDir()
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
input, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(dst, input, 0644)
|
||||
}
|
||||
|
||||
func copyFileIfExists(src, dst string) error {
|
||||
if !fileExists(src) {
|
||||
return nil
|
||||
}
|
||||
return copyFile(src, dst)
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
// Create destination directory
|
||||
if err := os.MkdirAll(dst, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read source directory
|
||||
entries, err := os.ReadDir(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy each entry
|
||||
for _, entry := range entries {
|
||||
srcPath := filepath.Join(src, entry.Name())
|
||||
dstPath := filepath.Join(dst, entry.Name())
|
||||
|
||||
if entry.IsDir() {
|
||||
if err := copyDir(srcPath, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := copyFile(srcPath, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractFS extracts files from an fs.FS to a destination directory
|
||||
func extractFS(fsys fs.FS, dst string) error {
|
||||
return fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
|
||||
@@ -96,7 +96,7 @@ func WithLock(lockPath string, fn func() error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lock.Release()
|
||||
defer func() { _ = lock.Release() }()
|
||||
|
||||
return fn()
|
||||
}
|
||||
|
||||
@@ -159,10 +159,10 @@ func (t *Talosctl) GetDisks(nodeIP string, insecure bool) ([]DiskInfo, error) {
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
// GetLinks queries network interfaces from a node
|
||||
func (t *Talosctl) GetLinks(nodeIP string, insecure bool) ([]map[string]interface{}, error) {
|
||||
// getResourceJSON executes a talosctl get command and returns parsed JSON array
|
||||
func (t *Talosctl) getResourceJSON(resourceType, nodeIP string, insecure bool) ([]map[string]interface{}, error) {
|
||||
args := []string{
|
||||
"get", "links",
|
||||
"get", resourceType,
|
||||
"--nodes", nodeIP,
|
||||
"-o", "json",
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func (t *Talosctl) GetLinks(nodeIP string, insecure bool) ([]map[string]interfac
|
||||
args = append(args, "--insecure")
|
||||
}
|
||||
|
||||
// Use jq to slurp the NDJSON into an array (like v.PoC does with jq -s)
|
||||
// Use jq to slurp the NDJSON into an array
|
||||
talosCmd := exec.Command("talosctl", args...)
|
||||
jqCmd := exec.Command("jq", "-s", ".")
|
||||
|
||||
@@ -184,59 +184,29 @@ func (t *Talosctl) GetLinks(nodeIP string, insecure bool) ([]map[string]interfac
|
||||
|
||||
output, err := jqCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process links JSON: %w\nOutput: %s", err, string(output))
|
||||
return nil, fmt.Errorf("failed to process %s JSON: %w\nOutput: %s", resourceType, err, string(output))
|
||||
}
|
||||
|
||||
if err := talosCmd.Wait(); err != nil {
|
||||
return nil, fmt.Errorf("talosctl get links failed: %w", err)
|
||||
return nil, fmt.Errorf("talosctl get %s failed: %w", resourceType, err)
|
||||
}
|
||||
|
||||
var result []map[string]interface{}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse links JSON: %w", err)
|
||||
return nil, fmt.Errorf("failed to parse %s JSON: %w", resourceType, err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetLinks queries network interfaces from a node
|
||||
func (t *Talosctl) GetLinks(nodeIP string, insecure bool) ([]map[string]interface{}, error) {
|
||||
return t.getResourceJSON("links", nodeIP, insecure)
|
||||
}
|
||||
|
||||
// GetRoutes queries routing table from a node
|
||||
func (t *Talosctl) GetRoutes(nodeIP string, insecure bool) ([]map[string]interface{}, error) {
|
||||
args := []string{
|
||||
"get", "routes",
|
||||
"--nodes", nodeIP,
|
||||
"-o", "json",
|
||||
}
|
||||
|
||||
if insecure {
|
||||
args = append(args, "--insecure")
|
||||
}
|
||||
|
||||
// Use jq to slurp the NDJSON into an array (like v.PoC does with jq -s)
|
||||
talosCmd := exec.Command("talosctl", args...)
|
||||
jqCmd := exec.Command("jq", "-s", ".")
|
||||
|
||||
// Pipe talosctl output to jq
|
||||
jqCmd.Stdin, _ = talosCmd.StdoutPipe()
|
||||
|
||||
if err := talosCmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("failed to start talosctl: %w", err)
|
||||
}
|
||||
|
||||
output, err := jqCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process routes JSON: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
if err := talosCmd.Wait(); err != nil {
|
||||
return nil, fmt.Errorf("talosctl get routes failed: %w", err)
|
||||
}
|
||||
|
||||
var result []map[string]interface{}
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse routes JSON: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return t.getResourceJSON("routes", nodeIP, insecure)
|
||||
}
|
||||
|
||||
// GetDefaultInterface finds the interface with the default route
|
||||
|
||||
@@ -38,7 +38,7 @@ func GetClusterHealth(kubeconfigPath string) (*HealthStatus, error) {
|
||||
}
|
||||
|
||||
// Check MetalLB
|
||||
if err := checkComponent(kubeconfigPath, "MetalLB", "metallb-system", "app=metallb"); err != nil {
|
||||
if err := checkComponent(kubeconfigPath, "metallb-system", "app=metallb"); err != nil {
|
||||
status.Components["metallb"] = "unhealthy"
|
||||
status.Issues = append(status.Issues, fmt.Sprintf("MetalLB: %v", err))
|
||||
status.Overall = "degraded"
|
||||
@@ -47,7 +47,7 @@ func GetClusterHealth(kubeconfigPath string) (*HealthStatus, error) {
|
||||
}
|
||||
|
||||
// Check Traefik
|
||||
if err := checkComponent(kubeconfigPath, "Traefik", "traefik", "app.kubernetes.io/name=traefik"); err != nil {
|
||||
if err := checkComponent(kubeconfigPath, "traefik", "app.kubernetes.io/name=traefik"); err != nil {
|
||||
status.Components["traefik"] = "unhealthy"
|
||||
status.Issues = append(status.Issues, fmt.Sprintf("Traefik: %v", err))
|
||||
status.Overall = "degraded"
|
||||
@@ -56,7 +56,7 @@ func GetClusterHealth(kubeconfigPath string) (*HealthStatus, error) {
|
||||
}
|
||||
|
||||
// Check cert-manager
|
||||
if err := checkComponent(kubeconfigPath, "cert-manager", "cert-manager", "app.kubernetes.io/instance=cert-manager"); err != nil {
|
||||
if err := checkComponent(kubeconfigPath, "cert-manager", "app.kubernetes.io/instance=cert-manager"); err != nil {
|
||||
status.Components["cert-manager"] = "unhealthy"
|
||||
status.Issues = append(status.Issues, fmt.Sprintf("cert-manager: %v", err))
|
||||
status.Overall = "degraded"
|
||||
@@ -65,7 +65,7 @@ func GetClusterHealth(kubeconfigPath string) (*HealthStatus, error) {
|
||||
}
|
||||
|
||||
// Check Longhorn
|
||||
if err := checkComponent(kubeconfigPath, "Longhorn", "longhorn-system", "app=longhorn-manager"); err != nil {
|
||||
if err := checkComponent(kubeconfigPath, "longhorn-system", "app=longhorn-manager"); err != nil {
|
||||
status.Components["longhorn"] = "unhealthy"
|
||||
status.Issues = append(status.Issues, fmt.Sprintf("Longhorn: %v", err))
|
||||
status.Overall = "degraded"
|
||||
@@ -81,7 +81,7 @@ func GetClusterHealth(kubeconfigPath string) (*HealthStatus, error) {
|
||||
}
|
||||
|
||||
// checkComponent checks if a component is running
|
||||
func checkComponent(kubeconfigPath, name, namespace, selector string) error {
|
||||
func checkComponent(kubeconfigPath, namespace, selector string) error {
|
||||
args := []string{"get", "pods", "-n", namespace, "-l", selector, "-o", "json"}
|
||||
if kubeconfigPath != "" {
|
||||
args = append([]string{"--kubeconfig", kubeconfigPath}, args...)
|
||||
|
||||
5
main.go
5
main.go
@@ -89,10 +89,7 @@ func main() {
|
||||
// Override with production origins if set
|
||||
if corsOrigins := os.Getenv("WILD_CORS_ORIGINS"); corsOrigins != "" {
|
||||
// Split comma-separated origins
|
||||
allowedOrigins = []string{}
|
||||
for _, origin := range splitAndTrim(corsOrigins, ",") {
|
||||
allowedOrigins = append(allowedOrigins, origin)
|
||||
}
|
||||
allowedOrigins = splitAndTrim(corsOrigins, ",")
|
||||
log.Printf("CORS configured for production origins: %v", allowedOrigins)
|
||||
} else {
|
||||
log.Printf("CORS configured for development origins")
|
||||
|
||||
Reference in New Issue
Block a user