Service config. Service logs. Service status.

This commit is contained in:
2025-10-14 05:26:45 +00:00
parent 1d11996cd6
commit afb0e09aae
9 changed files with 1316 additions and 11 deletions

143
internal/services/config.go Normal file
View File

@@ -0,0 +1,143 @@
package services
import (
"fmt"
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
"github.com/wild-cloud/wild-central/daemon/internal/contracts"
"github.com/wild-cloud/wild-central/daemon/internal/operations"
"github.com/wild-cloud/wild-central/daemon/internal/storage"
)
// UpdateConfig updates service configuration and optionally redeploys
func (m *Manager) UpdateConfig(instanceName, serviceName string, update contracts.ServiceConfigUpdate, broadcaster *operations.Broadcaster) (*contracts.ServiceConfigResponse, error) {
// 1. Validate service exists
manifest, err := m.GetManifest(serviceName)
if err != nil {
return nil, fmt.Errorf("service not found: %w", err)
}
namespace := manifest.Namespace
if deployment, ok := serviceDeployments[serviceName]; ok {
namespace = deployment.namespace
}
// 2. Load instance config
instanceDir := filepath.Join(m.dataDir, "instances", instanceName)
configPath := filepath.Join(instanceDir, "config.yaml")
if !storage.FileExists(configPath) {
return nil, fmt.Errorf("config file not found for instance %s", instanceName)
}
configData, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("failed to read config: %w", err)
}
var config map[string]interface{}
if err := yaml.Unmarshal(configData, &config); err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
// 3. Validate config keys against service manifest
validPaths := make(map[string]bool)
for _, path := range manifest.ConfigReferences {
validPaths[path] = true
}
for _, cfg := range manifest.ServiceConfig {
validPaths[cfg.Path] = true
}
for key := range update.Config {
if !validPaths[key] {
return nil, fmt.Errorf("invalid config key '%s' for service %s", key, serviceName)
}
}
// 4. Update config values
for key, value := range update.Config {
if err := setNestedValue(config, key, value); err != nil {
return nil, fmt.Errorf("failed to set config key '%s': %w", key, err)
}
}
// 5. Write updated config
updatedData, err := yaml.Marshal(config)
if err != nil {
return nil, fmt.Errorf("failed to marshal config: %w", err)
}
if err := os.WriteFile(configPath, updatedData, 0644); err != nil {
return nil, fmt.Errorf("failed to write config: %w", err)
}
// 6. Redeploy if requested
redeployed := false
if update.Redeploy {
// Fetch fresh templates if requested
if update.Fetch {
if err := m.Fetch(instanceName, serviceName); err != nil {
return nil, fmt.Errorf("failed to fetch templates: %w", err)
}
}
// Recompile templates
if err := m.Compile(instanceName, serviceName); err != nil {
return nil, fmt.Errorf("failed to recompile templates: %w", err)
}
// Redeploy service
if err := m.Deploy(instanceName, serviceName, "", broadcaster); err != nil {
return nil, fmt.Errorf("failed to redeploy service: %w", err)
}
redeployed = true
}
// 7. Build response
message := "Service configuration updated successfully"
if redeployed {
message = "Service configuration updated and redeployed successfully"
}
return &contracts.ServiceConfigResponse{
Service: serviceName,
Namespace: namespace,
Config: update.Config,
Redeployed: redeployed,
Message: message,
}, nil
}
// setNestedValue sets a value in a nested map using dot notation
func setNestedValue(data map[string]interface{}, path string, value interface{}) error {
keys := strings.Split(path, ".")
current := data
for i, key := range keys {
if i == len(keys)-1 {
// Last key - set the value
current[key] = value
return nil
}
// Navigate to the next level
if next, ok := current[key].(map[string]interface{}); ok {
current = next
} else if current[key] == nil {
// Create intermediate map if it doesn't exist
next := make(map[string]interface{})
current[key] = next
current = next
} else {
return fmt.Errorf("path '%s' conflicts with existing non-map value at '%s'", path, key)
}
}
return nil
}

283
internal/services/logs.go Normal file
View File

@@ -0,0 +1,283 @@
package services
import (
"bufio"
"encoding/json"
"fmt"
"io"
"strings"
"time"
"github.com/wild-cloud/wild-central/daemon/internal/contracts"
"github.com/wild-cloud/wild-central/daemon/internal/storage"
"github.com/wild-cloud/wild-central/daemon/internal/tools"
)
// GetLogs retrieves buffered logs from a service
func (m *Manager) GetLogs(instanceName, serviceName string, opts contracts.ServiceLogsRequest) (*contracts.ServiceLogsResponse, error) {
// 1. Get service namespace
manifest, err := m.GetManifest(serviceName)
if err != nil {
return nil, fmt.Errorf("service not found: %w", err)
}
namespace := manifest.Namespace
if deployment, ok := serviceDeployments[serviceName]; ok {
namespace = deployment.namespace
}
// 2. Get kubeconfig path
kubeconfigPath := tools.GetKubeconfigPath(m.dataDir, instanceName)
if !storage.FileExists(kubeconfigPath) {
return nil, fmt.Errorf("kubeconfig not found - cluster may not be bootstrapped")
}
kubectl := tools.NewKubectl(kubeconfigPath)
// 3. Get pod name (use first pod if no specific container specified)
podName := ""
if opts.Container == "" {
// Get first pod in namespace
podName, err = kubectl.GetFirstPodName(namespace)
if err != nil {
// Check if it's because there are no pods
pods, _ := kubectl.GetPods(namespace)
if len(pods) == 0 {
// Return empty logs response instead of error when no pods exist
return &contracts.ServiceLogsResponse{
Lines: []string{"No pods found for service. The service may not be deployed yet."},
}, nil
}
return nil, fmt.Errorf("failed to find pod: %w", err)
}
// If no container specified, get first container
containers, err := kubectl.GetPodContainers(namespace, podName)
if err != nil {
return nil, fmt.Errorf("failed to get pod containers: %w", err)
}
if len(containers) > 0 {
opts.Container = containers[0]
}
} else {
// Find pod with specified container
pods, err := kubectl.GetPods(namespace)
if err != nil {
return nil, fmt.Errorf("failed to list pods: %w", err)
}
if len(pods) > 0 {
podName = pods[0].Name
} else {
return nil, fmt.Errorf("no pods found in namespace %s", namespace)
}
}
// 4. Set default tail if not specified
if opts.Tail == 0 {
opts.Tail = 100
}
// Enforce maximum tail
if opts.Tail > 5000 {
opts.Tail = 5000
}
// 5. Get logs
logOpts := tools.LogOptions{
Container: opts.Container,
Tail: opts.Tail,
Previous: opts.Previous,
Since: opts.Since,
}
logs, err := kubectl.GetLogs(namespace, podName, logOpts)
if err != nil {
return nil, fmt.Errorf("failed to get logs: %w", err)
}
// 6. Parse logs into lines
lines := strings.Split(strings.TrimSpace(logs), "\n")
truncated := false
if len(lines) > opts.Tail {
lines = lines[len(lines)-opts.Tail:]
truncated = true
}
return &contracts.ServiceLogsResponse{
Service: serviceName,
Namespace: namespace,
Container: opts.Container,
Lines: lines,
Truncated: truncated,
Timestamp: time.Now(),
}, nil
}
// StreamLogs streams logs from a service using SSE
func (m *Manager) StreamLogs(instanceName, serviceName string, opts contracts.ServiceLogsRequest, writer io.Writer) error {
// 1. Get service namespace
manifest, err := m.GetManifest(serviceName)
if err != nil {
return fmt.Errorf("service not found: %w", err)
}
namespace := manifest.Namespace
if deployment, ok := serviceDeployments[serviceName]; ok {
namespace = deployment.namespace
}
// 2. Get kubeconfig path
kubeconfigPath := tools.GetKubeconfigPath(m.dataDir, instanceName)
if !storage.FileExists(kubeconfigPath) {
return fmt.Errorf("kubeconfig not found - cluster may not be bootstrapped")
}
kubectl := tools.NewKubectl(kubeconfigPath)
// 3. Get pod name
podName := ""
if opts.Container == "" {
podName, err = kubectl.GetFirstPodName(namespace)
if err != nil {
// Check if it's because there are no pods
pods, _ := kubectl.GetPods(namespace)
if len(pods) == 0 {
// Send a message event indicating no pods
fmt.Fprintf(writer, "data: No pods found for service. The service may not be deployed yet.\n\n")
return nil
}
return fmt.Errorf("failed to find pod: %w", err)
}
// Get first container
containers, err := kubectl.GetPodContainers(namespace, podName)
if err != nil {
return fmt.Errorf("failed to get pod containers: %w", err)
}
if len(containers) > 0 {
opts.Container = containers[0]
}
} else {
pods, err := kubectl.GetPods(namespace)
if err != nil {
return fmt.Errorf("failed to list pods: %w", err)
}
if len(pods) > 0 {
podName = pods[0].Name
} else {
return fmt.Errorf("no pods found in namespace %s", namespace)
}
}
// 4. Set default tail for streaming
if opts.Tail == 0 {
opts.Tail = 50
}
// 5. Stream logs
logOpts := tools.LogOptions{
Container: opts.Container,
Tail: opts.Tail,
Since: opts.Since,
}
cmd, err := kubectl.StreamLogs(namespace, podName, logOpts)
if err != nil {
return fmt.Errorf("failed to start log stream: %w", err)
}
// Get stdout pipe
stdout, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to get stdout pipe: %w", err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
return fmt.Errorf("failed to get stderr pipe: %w", err)
}
// Start command
if err := cmd.Start(); err != nil {
return fmt.Errorf("failed to start kubectl logs: %w", err)
}
// Stream logs line by line as SSE events
scanner := bufio.NewScanner(stdout)
errScanner := bufio.NewScanner(stderr)
// Channel to signal completion
done := make(chan error, 1)
// Read stderr in background
go func() {
for errScanner.Scan() {
event := contracts.ServiceLogsSSEEvent{
Type: "error",
Error: errScanner.Text(),
Container: opts.Container,
Timestamp: time.Now(),
}
writeSSEEvent(writer, event)
}
}()
// Read stdout
go func() {
for scanner.Scan() {
event := contracts.ServiceLogsSSEEvent{
Type: "log",
Line: scanner.Text(),
Container: opts.Container,
Timestamp: time.Now(),
}
if err := writeSSEEvent(writer, event); err != nil {
done <- err
return
}
}
if err := scanner.Err(); err != nil {
done <- err
return
}
done <- nil
}()
// Wait for completion or error
err = <-done
cmd.Process.Kill()
// Send end event
endEvent := contracts.ServiceLogsSSEEvent{
Type: "end",
Timestamp: time.Now(),
}
writeSSEEvent(writer, endEvent)
return err
}
// writeSSEEvent writes an SSE event to the writer
func writeSSEEvent(w io.Writer, event contracts.ServiceLogsSSEEvent) error {
// Marshal the event to JSON safely
jsonData, err := json.Marshal(event)
if err != nil {
return fmt.Errorf("failed to marshal SSE event: %w", err)
}
// Write SSE format: "data: <json>\n\n"
data := fmt.Sprintf("data: %s\n\n", jsonData)
_, err = w.Write([]byte(data))
if err != nil {
return err
}
// Flush if writer supports it
if flusher, ok := w.(interface{ Flush() }); ok {
flusher.Flush()
}
return nil
}

View File

@@ -37,10 +37,25 @@ func NewManager(dataDir string) *Manager {
manifest, err := setup.GetManifest(serviceName)
if err == nil {
// Convert setup.ServiceManifest to services.ServiceManifest
// Convert setup.ConfigDefinition map to services.ConfigDefinition map
serviceConfig := make(map[string]ConfigDefinition)
for key, cfg := range manifest.ServiceConfig {
serviceConfig[key] = ConfigDefinition{
Path: cfg.Path,
Prompt: cfg.Prompt,
Default: cfg.Default,
Type: cfg.Type,
}
}
manifests[serviceName] = &ServiceManifest{
Name: manifest.Name,
Description: manifest.Description,
Category: manifest.Category,
Name: manifest.Name,
Description: manifest.Description,
Namespace: manifest.Namespace,
Category: manifest.Category,
Dependencies: manifest.Dependencies,
ConfigReferences: manifest.ConfigReferences,
ServiceConfig: serviceConfig,
}
}
}
@@ -60,6 +75,7 @@ type Service struct {
Version string `json:"version"`
Namespace string `json:"namespace"`
Dependencies []string `json:"dependencies,omitempty"`
HasConfig bool `json:"hasConfig"` // Whether service has configurable fields
}
// Base services in Wild Cloud (kept for reference/validation)
@@ -147,12 +163,14 @@ func (m *Manager) List(instanceName string) ([]Service, error) {
// Get service info from manifest if available
var namespace, description, version string
var dependencies []string
var hasConfig bool
if manifest, ok := m.manifests[name]; ok {
namespace = manifest.Namespace
description = manifest.Description
version = manifest.Category // Using category as version for now
dependencies = manifest.Dependencies
hasConfig = len(manifest.ServiceConfig) > 0
} else {
// Fall back to hardcoded map
namespace = name + "-system" // default
@@ -168,6 +186,7 @@ func (m *Manager) List(instanceName string) ([]Service, error) {
Description: description,
Version: version,
Dependencies: dependencies,
HasConfig: hasConfig,
}
services = append(services, service)

148
internal/services/status.go Normal file
View File

@@ -0,0 +1,148 @@
package services
import (
"fmt"
"os"
"path/filepath"
"time"
"gopkg.in/yaml.v3"
"github.com/wild-cloud/wild-central/daemon/internal/contracts"
"github.com/wild-cloud/wild-central/daemon/internal/storage"
"github.com/wild-cloud/wild-central/daemon/internal/tools"
)
// GetDetailedStatus returns comprehensive service status including pods and health
func (m *Manager) GetDetailedStatus(instanceName, serviceName string) (*contracts.DetailedServiceStatus, error) {
// 1. Get service manifest and namespace
manifest, err := m.GetManifest(serviceName)
if err != nil {
return nil, fmt.Errorf("service not found: %w", err)
}
namespace := manifest.Namespace
deploymentName := manifest.GetDeploymentName()
// Check hardcoded map for correct deployment name
if deployment, ok := serviceDeployments[serviceName]; ok {
namespace = deployment.namespace
deploymentName = deployment.deploymentName
}
// 2. Get kubeconfig path
kubeconfigPath := tools.GetKubeconfigPath(m.dataDir, instanceName)
if !storage.FileExists(kubeconfigPath) {
return &contracts.DetailedServiceStatus{
Name: serviceName,
Namespace: namespace,
DeploymentStatus: "NotFound",
Replicas: contracts.ReplicaStatus{},
Pods: []contracts.PodStatus{},
LastUpdated: time.Now(),
}, nil
}
kubectl := tools.NewKubectl(kubeconfigPath)
// 3. Get deployment information
deploymentInfo, err := kubectl.GetDeployment(deploymentName, namespace)
deploymentStatus := "NotFound"
replicas := contracts.ReplicaStatus{}
if err == nil {
replicas = contracts.ReplicaStatus{
Desired: deploymentInfo.Desired,
Current: deploymentInfo.Current,
Ready: deploymentInfo.Ready,
Available: deploymentInfo.Available,
}
// Determine deployment status
if deploymentInfo.Ready == deploymentInfo.Desired && deploymentInfo.Desired > 0 {
deploymentStatus = "Ready"
} else if deploymentInfo.Ready < deploymentInfo.Desired {
if deploymentInfo.Current > deploymentInfo.Desired {
deploymentStatus = "Progressing"
} else {
deploymentStatus = "Degraded"
}
} else if deploymentInfo.Desired == 0 {
deploymentStatus = "Scaled to Zero"
}
}
// 4. Get pod information
podInfos, err := kubectl.GetPods(namespace)
pods := make([]contracts.PodStatus, 0, len(podInfos))
if err == nil {
for _, podInfo := range podInfos {
pods = append(pods, contracts.PodStatus{
Name: podInfo.Name,
Status: podInfo.Status,
Ready: podInfo.Ready,
Restarts: podInfo.Restarts,
Age: podInfo.Age,
Node: podInfo.Node,
IP: podInfo.IP,
})
}
}
// 5. Load current config values
instanceDir := filepath.Join(m.dataDir, "instances", instanceName)
configPath := filepath.Join(instanceDir, "config.yaml")
configValues := make(map[string]interface{})
if storage.FileExists(configPath) {
configData, err := os.ReadFile(configPath)
if err == nil {
var instanceConfig map[string]interface{}
if err := yaml.Unmarshal(configData, &instanceConfig); err == nil {
// Extract values for all config paths
for _, path := range manifest.ConfigReferences {
if value := getNestedValue(instanceConfig, path); value != nil {
configValues[path] = value
}
}
for _, cfg := range manifest.ServiceConfig {
if value := getNestedValue(instanceConfig, cfg.Path); value != nil {
configValues[cfg.Path] = value
}
}
}
}
}
// 6. Convert ServiceConfig to contracts.ConfigDefinition
contractsServiceConfig := make(map[string]contracts.ConfigDefinition)
for key, cfg := range manifest.ServiceConfig {
contractsServiceConfig[key] = contracts.ConfigDefinition{
Path: cfg.Path,
Prompt: cfg.Prompt,
Default: cfg.Default,
Type: cfg.Type,
}
}
// 7. Build detailed status response
status := &contracts.DetailedServiceStatus{
Name: serviceName,
Namespace: namespace,
DeploymentStatus: deploymentStatus,
Replicas: replicas,
Pods: pods,
Config: configValues,
Manifest: &contracts.ServiceManifest{
Name: manifest.Name,
Description: manifest.Description,
Namespace: manifest.Namespace,
ConfigReferences: manifest.ConfigReferences,
ServiceConfig: contractsServiceConfig,
},
LastUpdated: time.Now(),
}
return status, nil
}