Compare commits
6 Commits
005dc30aa5
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c8fd702d1b | ||
|
|
1271eebf38 | ||
|
|
b00dffd2b6 | ||
|
|
c623843d53 | ||
|
|
b330b2aea7 | ||
|
|
7cd434aabf |
@@ -1,5 +1,11 @@
|
||||
# Building the Wild Cloud Central API
|
||||
|
||||
These are instructions for working with the Wild Cloud Central API (Wild API). Wild API is a web service that runs on Wild Central. Users can interact with the API directly, through the Wild CLI, or through the Wild Web App. The CLI and Web App depend on the API extensively.
|
||||
|
||||
Whenever changes are made to the API, it is important that the CLI and API are updated appropriately.
|
||||
|
||||
Use tests on the API extensively to keep the API functioning well for all clients, but don't duplicate test layers. If something is tested in one place, it doesn't need to be tested again in another place. Prefer unit tests. Tests should be run with `make test` after all API changes. If a bug was found by any means other than tests, it is a signal that a test should have been present to catch it earlier, so make sure a new test catches that bug before fixing it.
|
||||
|
||||
## Dev Environment Requirements
|
||||
|
||||
- Go 1.21+
|
||||
@@ -38,6 +44,7 @@
|
||||
- Write unit tests for all functions and methods.
|
||||
- Make and use common modules. For example, one module should handle all interactions with talosctl. Another modules should handle all interactions with kubectl.
|
||||
- If the code is getting long and complex, break it into smaller modules.
|
||||
- API requests and responses should be valid JSON. Object attributes should be standard JSON camel-cased.
|
||||
|
||||
### Features
|
||||
|
||||
|
||||
6
go.mod
6
go.mod
@@ -7,3 +7,9 @@ require (
|
||||
github.com/rs/cors v1.11.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/testify v1.11.1 // indirect
|
||||
)
|
||||
|
||||
6
go.sum
6
go.sum
@@ -1,7 +1,13 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/instance"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/operations"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/secrets"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/storage"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/tools"
|
||||
)
|
||||
|
||||
@@ -30,6 +31,7 @@ type API struct {
|
||||
context *context.Manager
|
||||
instance *instance.Manager
|
||||
dnsmasq *dnsmasq.ConfigGenerator
|
||||
opsMgr *operations.Manager // Operations manager
|
||||
broadcaster *operations.Broadcaster // SSE broadcaster for operation output
|
||||
}
|
||||
|
||||
@@ -57,6 +59,7 @@ func NewAPI(dataDir, appsDir string) (*API, error) {
|
||||
context: context.NewManager(dataDir),
|
||||
instance: instance.NewManager(dataDir),
|
||||
dnsmasq: dnsmasq.NewConfigGenerator(dnsmasqConfigPath),
|
||||
opsMgr: operations.NewManager(dataDir),
|
||||
broadcaster: operations.NewBroadcaster(),
|
||||
}, nil
|
||||
}
|
||||
@@ -85,6 +88,7 @@ func (api *API) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/discover", api.NodeDiscover).Methods("POST")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/detect", api.NodeDetect).Methods("POST")
|
||||
r.HandleFunc("/api/v1/instances/{name}/discovery", api.NodeDiscoveryStatus).Methods("GET")
|
||||
r.HandleFunc("/api/v1/instances/{name}/discovery/cancel", api.NodeDiscoveryCancel).Methods("POST")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/hardware/{ip}", api.NodeHardware).Methods("GET")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/fetch-templates", api.NodeFetchTemplates).Methods("POST")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes", api.NodeAdd).Methods("POST")
|
||||
@@ -92,15 +96,16 @@ func (api *API) RegisterRoutes(r *mux.Router) {
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/{node}", api.NodeGet).Methods("GET")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/{node}", api.NodeUpdate).Methods("PUT")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/{node}/apply", api.NodeApply).Methods("POST")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/{node}/reset", api.NodeReset).Methods("POST")
|
||||
r.HandleFunc("/api/v1/instances/{name}/nodes/{node}", api.NodeDelete).Methods("DELETE")
|
||||
|
||||
// Asset management
|
||||
r.HandleFunc("/api/v1/assets", api.AssetsListSchematics).Methods("GET")
|
||||
r.HandleFunc("/api/v1/assets/{schematicId}", api.AssetsGetSchematic).Methods("GET")
|
||||
r.HandleFunc("/api/v1/assets/{schematicId}/download", api.AssetsDownload).Methods("POST")
|
||||
r.HandleFunc("/api/v1/assets/{schematicId}/pxe/{assetType}", api.AssetsServePXE).Methods("GET")
|
||||
r.HandleFunc("/api/v1/assets/{schematicId}/status", api.AssetsGetStatus).Methods("GET")
|
||||
r.HandleFunc("/api/v1/assets/{schematicId}", api.AssetsDeleteSchematic).Methods("DELETE")
|
||||
// PXE Asset management (schematic@version composite key)
|
||||
r.HandleFunc("/api/v1/pxe/assets", api.AssetsList).Methods("GET")
|
||||
r.HandleFunc("/api/v1/pxe/assets/{schematicId}/{version}", api.AssetsGet).Methods("GET")
|
||||
r.HandleFunc("/api/v1/pxe/assets/{schematicId}/{version}/download", api.AssetsDownload).Methods("POST")
|
||||
r.HandleFunc("/api/v1/pxe/assets/{schematicId}/{version}", api.AssetsDelete).Methods("DELETE")
|
||||
r.HandleFunc("/api/v1/pxe/assets/{schematicId}/{version}/pxe/{assetType}", api.AssetsServePXE).Methods("GET")
|
||||
r.HandleFunc("/api/v1/pxe/assets/{schematicId}/{version}/status", api.AssetsGetStatus).Methods("GET")
|
||||
|
||||
// Instance-schematic relationship
|
||||
r.HandleFunc("/api/v1/instances/{name}/schematic", api.SchematicGetInstanceSchematic).Methods("GET")
|
||||
@@ -299,7 +304,7 @@ func (api *API) GetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// updateYAMLFile updates a YAML file with the provided key-value pairs
|
||||
func (api *API) updateYAMLFile(w http.ResponseWriter, r *http.Request, instanceName, fileType string, updateFunc func(string, string, string) error) {
|
||||
func (api *API) updateYAMLFile(w http.ResponseWriter, r *http.Request, instanceName, fileType string) {
|
||||
if err := api.instance.ValidateInstance(instanceName); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
@@ -324,13 +329,44 @@ func (api *API) updateYAMLFile(w http.ResponseWriter, r *http.Request, instanceN
|
||||
filePath = api.instance.GetInstanceSecretsPath(instanceName)
|
||||
}
|
||||
|
||||
// Update each key-value pair
|
||||
for key, value := range updates {
|
||||
valueStr := fmt.Sprintf("%v", value)
|
||||
if err := updateFunc(filePath, key, valueStr); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update %s key %s: %v", fileType, key, err))
|
||||
// Read existing config/secrets file
|
||||
existingContent, err := storage.ReadFile(filePath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to read existing %s: %v", fileType, err))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse existing content or initialize empty map
|
||||
var existingConfig map[string]interface{}
|
||||
if len(existingContent) > 0 {
|
||||
if err := yaml.Unmarshal(existingContent, &existingConfig); err != nil {
|
||||
respondError(w, http.StatusBadRequest, fmt.Sprintf("Failed to parse existing %s: %v", fileType, err))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
existingConfig = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// Merge updates into existing config (shallow merge for top-level keys)
|
||||
// This preserves unmodified keys while updating specified ones
|
||||
for key, value := range updates {
|
||||
existingConfig[key] = value
|
||||
}
|
||||
|
||||
// Marshal the merged config back to YAML with proper formatting
|
||||
yamlContent, err := yaml.Marshal(existingConfig)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to marshal YAML: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Write the complete merged YAML content to the file with proper locking
|
||||
lockPath := filePath + ".lock"
|
||||
if err := storage.WithLock(lockPath, func() error {
|
||||
return storage.WriteFile(filePath, yamlContent, 0644)
|
||||
}); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update %s: %v", fileType, err))
|
||||
return
|
||||
}
|
||||
|
||||
// Capitalize first letter of fileType for message
|
||||
@@ -348,7 +384,7 @@ func (api *API) updateYAMLFile(w http.ResponseWriter, r *http.Request, instanceN
|
||||
func (api *API) UpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
name := vars["name"]
|
||||
api.updateYAMLFile(w, r, name, "config", api.config.SetConfigValue)
|
||||
api.updateYAMLFile(w, r, name, "config")
|
||||
}
|
||||
|
||||
// GetSecrets retrieves instance secrets (redacted by default)
|
||||
@@ -396,7 +432,7 @@ func (api *API) GetSecrets(w http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) UpdateSecrets(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
name := vars["name"]
|
||||
api.updateYAMLFile(w, r, name, "secrets", api.secrets.SetSecret)
|
||||
api.updateYAMLFile(w, r, name, "secrets")
|
||||
}
|
||||
|
||||
// GetContext retrieves current context
|
||||
|
||||
@@ -11,45 +11,46 @@ import (
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/assets"
|
||||
)
|
||||
|
||||
// AssetsListSchematics lists all available schematics
|
||||
func (api *API) AssetsListSchematics(w http.ResponseWriter, r *http.Request) {
|
||||
// AssetsList lists all available assets (schematic@version combinations)
|
||||
func (api *API) AssetsList(w http.ResponseWriter, r *http.Request) {
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
|
||||
schematics, err := assetsMgr.ListSchematics()
|
||||
assetList, err := assetsMgr.ListAssets()
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to list schematics: %v", err))
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to list assets: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"schematics": schematics,
|
||||
"assets": assetList,
|
||||
})
|
||||
}
|
||||
|
||||
// AssetsGetSchematic returns details for a specific schematic
|
||||
func (api *API) AssetsGetSchematic(w http.ResponseWriter, r *http.Request) {
|
||||
// AssetsGet returns details for a specific asset (schematic@version)
|
||||
func (api *API) AssetsGet(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
schematicID := vars["schematicId"]
|
||||
version := vars["version"]
|
||||
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
|
||||
schematic, err := assetsMgr.GetSchematic(schematicID)
|
||||
asset, err := assetsMgr.GetAsset(schematicID, version)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Schematic not found: %v", err))
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Asset not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, schematic)
|
||||
respondJSON(w, http.StatusOK, asset)
|
||||
}
|
||||
|
||||
// AssetsDownload downloads assets for a schematic
|
||||
// AssetsDownload downloads assets for a schematic@version
|
||||
func (api *API) AssetsDownload(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
schematicID := vars["schematicId"]
|
||||
version := vars["version"]
|
||||
|
||||
// Parse request body
|
||||
var req struct {
|
||||
Version string `json:"version"`
|
||||
Platform string `json:"platform,omitempty"`
|
||||
AssetTypes []string `json:"asset_types,omitempty"`
|
||||
}
|
||||
@@ -59,11 +60,6 @@ func (api *API) AssetsDownload(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Version == "" {
|
||||
respondError(w, http.StatusBadRequest, "version is required")
|
||||
return
|
||||
}
|
||||
|
||||
// Default platform to amd64 if not specified
|
||||
if req.Platform == "" {
|
||||
req.Platform = "amd64"
|
||||
@@ -71,7 +67,7 @@ func (api *API) AssetsDownload(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Download assets
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
if err := assetsMgr.DownloadAssets(schematicID, req.Version, req.Platform, req.AssetTypes); err != nil {
|
||||
if err := assetsMgr.DownloadAssets(schematicID, version, req.Platform, req.AssetTypes); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to download assets: %v", err))
|
||||
return
|
||||
}
|
||||
@@ -79,7 +75,7 @@ func (api *API) AssetsDownload(w http.ResponseWriter, r *http.Request) {
|
||||
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "Assets downloaded successfully",
|
||||
"schematic_id": schematicID,
|
||||
"version": req.Version,
|
||||
"version": version,
|
||||
"platform": req.Platform,
|
||||
})
|
||||
}
|
||||
@@ -88,12 +84,13 @@ func (api *API) AssetsDownload(w http.ResponseWriter, r *http.Request) {
|
||||
func (api *API) AssetsServePXE(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
schematicID := vars["schematicId"]
|
||||
version := vars["version"]
|
||||
assetType := vars["assetType"]
|
||||
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
|
||||
// Get asset path
|
||||
assetPath, err := assetsMgr.GetAssetPath(schematicID, assetType)
|
||||
assetPath, err := assetsMgr.GetAssetPath(schematicID, version, assetType)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Asset not found: %v", err))
|
||||
return
|
||||
@@ -137,36 +134,39 @@ func (api *API) AssetsServePXE(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeContent(w, r, info.Name(), info.ModTime(), file)
|
||||
}
|
||||
|
||||
// AssetsGetStatus returns download status for a schematic
|
||||
// AssetsGetStatus returns download status for a schematic@version
|
||||
func (api *API) AssetsGetStatus(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
schematicID := vars["schematicId"]
|
||||
version := vars["version"]
|
||||
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
|
||||
status, err := assetsMgr.GetAssetStatus(schematicID)
|
||||
status, err := assetsMgr.GetAssetStatus(schematicID, version)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Schematic not found: %v", err))
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Asset not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, status)
|
||||
}
|
||||
|
||||
// AssetsDeleteSchematic deletes a schematic and all its assets
|
||||
func (api *API) AssetsDeleteSchematic(w http.ResponseWriter, r *http.Request) {
|
||||
// AssetsDelete deletes an asset (schematic@version) and all its files
|
||||
func (api *API) AssetsDelete(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
schematicID := vars["schematicId"]
|
||||
version := vars["version"]
|
||||
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
|
||||
if err := assetsMgr.DeleteSchematic(schematicID); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to delete schematic: %v", err))
|
||||
if err := assetsMgr.DeleteAsset(schematicID, version); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to delete asset: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "Schematic deleted successfully",
|
||||
"message": "Asset deleted successfully",
|
||||
"schematic_id": schematicID,
|
||||
"version": version,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -46,15 +46,15 @@ func (api *API) ClusterGenerateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Create cluster config
|
||||
config := cluster.ClusterConfig{
|
||||
clusterConfig := cluster.ClusterConfig{
|
||||
ClusterName: clusterName,
|
||||
VIP: vip,
|
||||
Version: version,
|
||||
}
|
||||
|
||||
// Generate configuration
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
if err := clusterMgr.GenerateConfig(instanceName, &config); err != nil {
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
if err := clusterMgr.GenerateConfig(instanceName, &clusterConfig); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to generate config: %v", err))
|
||||
return
|
||||
}
|
||||
@@ -90,26 +90,14 @@ func (api *API) ClusterBootstrap(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Start bootstrap operation
|
||||
opsMgr := operations.NewManager(api.dataDir)
|
||||
opID, err := opsMgr.Start(instanceName, "bootstrap", req.Node)
|
||||
// Bootstrap with progress tracking
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
opID, err := clusterMgr.Bootstrap(instanceName, req.Node)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to start operation: %v", err))
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to start bootstrap: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Bootstrap in background
|
||||
go func() {
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := clusterMgr.Bootstrap(instanceName, req.Node); err != nil {
|
||||
_ = opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
} else {
|
||||
_ = opsMgr.Update(instanceName, opID, "completed", "Bootstrap completed", 100)
|
||||
}
|
||||
}()
|
||||
|
||||
respondJSON(w, http.StatusAccepted, map[string]string{
|
||||
"operation_id": opID,
|
||||
"message": "Bootstrap initiated",
|
||||
@@ -138,7 +126,7 @@ func (api *API) ClusterConfigureEndpoints(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// Configure endpoints
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
if err := clusterMgr.ConfigureEndpoints(instanceName, req.IncludeNodes); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to configure endpoints: %v", err))
|
||||
return
|
||||
@@ -161,7 +149,7 @@ func (api *API) ClusterGetStatus(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Get status
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
status, err := clusterMgr.GetStatus(instanceName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to get status: %v", err))
|
||||
@@ -183,7 +171,7 @@ func (api *API) ClusterHealth(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Get health checks
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
checks, err := clusterMgr.Health(instanceName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to get health: %v", err))
|
||||
@@ -219,7 +207,7 @@ func (api *API) ClusterGetKubeconfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Get kubeconfig
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
kubeconfig, err := clusterMgr.GetKubeconfig(instanceName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Kubeconfig not found: %v", err))
|
||||
@@ -243,7 +231,7 @@ func (api *API) ClusterGenerateKubeconfig(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
// Regenerate kubeconfig from cluster
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
if err := clusterMgr.RegenerateKubeconfig(instanceName); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to generate kubeconfig: %v", err))
|
||||
return
|
||||
@@ -266,7 +254,7 @@ func (api *API) ClusterGetTalosconfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Get talosconfig
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
talosconfig, err := clusterMgr.GetTalosconfig(instanceName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Talosconfig not found: %v", err))
|
||||
@@ -314,7 +302,7 @@ func (api *API) ClusterReset(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Reset in background
|
||||
go func() {
|
||||
clusterMgr := cluster.NewManager(api.dataDir)
|
||||
clusterMgr := cluster.NewManager(api.dataDir, api.opsMgr)
|
||||
_ = opsMgr.UpdateStatus(instanceName, opID, "running")
|
||||
|
||||
if err := clusterMgr.Reset(instanceName, req.Confirm); err != nil {
|
||||
|
||||
656
internal/api/v1/handlers_config_test.go
Normal file
656
internal/api/v1/handlers_config_test.go
Normal file
@@ -0,0 +1,656 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/storage"
|
||||
)
|
||||
|
||||
func setupTestAPI(t *testing.T) (*API, string) {
|
||||
tmpDir := t.TempDir()
|
||||
appsDir := filepath.Join(tmpDir, "apps")
|
||||
|
||||
api, err := NewAPI(tmpDir, appsDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test API: %v", err)
|
||||
}
|
||||
|
||||
return api, tmpDir
|
||||
}
|
||||
|
||||
func createTestInstance(t *testing.T, api *API, name string) {
|
||||
if err := api.instance.CreateInstance(name); err != nil {
|
||||
t.Fatalf("Failed to create test instance: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_DeltaUpdate(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Create initial config
|
||||
initialConfig := map[string]interface{}{
|
||||
"domain": "old.com",
|
||||
"email": "admin@old.com",
|
||||
"cluster": map[string]interface{}{
|
||||
"name": "test-cluster",
|
||||
},
|
||||
}
|
||||
initialYAML, _ := yaml.Marshal(initialConfig)
|
||||
if err := storage.WriteFile(configPath, initialYAML, 0644); err != nil {
|
||||
t.Fatalf("Failed to write initial config: %v", err)
|
||||
}
|
||||
|
||||
// Update only domain
|
||||
updateData := map[string]interface{}{
|
||||
"domain": "new.com",
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify merged config
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
// Domain should be updated
|
||||
if result["domain"] != "new.com" {
|
||||
t.Errorf("Expected domain='new.com', got %v", result["domain"])
|
||||
}
|
||||
|
||||
// Email should be preserved
|
||||
if result["email"] != "admin@old.com" {
|
||||
t.Errorf("Expected email='admin@old.com', got %v", result["email"])
|
||||
}
|
||||
|
||||
// Cluster should be preserved
|
||||
if cluster, ok := result["cluster"].(map[string]interface{}); !ok {
|
||||
t.Errorf("Cluster not preserved as map")
|
||||
} else if cluster["name"] != "test-cluster" {
|
||||
t.Errorf("Cluster name not preserved")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_FullReplacement(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Create initial config
|
||||
initialConfig := map[string]interface{}{
|
||||
"domain": "old.com",
|
||||
"email": "admin@old.com",
|
||||
"oldKey": "oldValue",
|
||||
}
|
||||
initialYAML, _ := yaml.Marshal(initialConfig)
|
||||
if err := storage.WriteFile(configPath, initialYAML, 0644); err != nil {
|
||||
t.Fatalf("Failed to write initial config: %v", err)
|
||||
}
|
||||
|
||||
// Full replacement
|
||||
newConfig := map[string]interface{}{
|
||||
"domain": "new.com",
|
||||
"email": "new@new.com",
|
||||
"newKey": "newValue",
|
||||
}
|
||||
newYAML, _ := yaml.Marshal(newConfig)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(newYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify result
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
// All new values should be present
|
||||
if result["domain"] != "new.com" {
|
||||
t.Errorf("Expected domain='new.com', got %v", result["domain"])
|
||||
}
|
||||
if result["email"] != "new@new.com" {
|
||||
t.Errorf("Expected email='new@new.com', got %v", result["email"])
|
||||
}
|
||||
if result["newKey"] != "newValue" {
|
||||
t.Errorf("Expected newKey='newValue', got %v", result["newKey"])
|
||||
}
|
||||
|
||||
// Old key should still be present (shallow merge)
|
||||
if result["oldKey"] != "oldValue" {
|
||||
t.Errorf("Expected oldKey='oldValue', got %v", result["oldKey"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_NestedStructure(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Update with nested structure
|
||||
updateData := map[string]interface{}{
|
||||
"cloud": map[string]interface{}{
|
||||
"domain": "test.com",
|
||||
"dns": map[string]interface{}{
|
||||
"ip": "1.2.3.4",
|
||||
"port": 53,
|
||||
},
|
||||
},
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify nested structure preserved
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
// Verify nested structure is proper YAML, not Go map notation
|
||||
resultStr := string(resultData)
|
||||
if bytes.Contains(resultData, []byte("map[")) {
|
||||
t.Errorf("Result contains Go map notation: %s", resultStr)
|
||||
}
|
||||
|
||||
// Verify structure is accessible
|
||||
cloud, ok := result["cloud"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("cloud is not a map: %T", result["cloud"])
|
||||
}
|
||||
|
||||
if cloud["domain"] != "test.com" {
|
||||
t.Errorf("Expected cloud.domain='test.com', got %v", cloud["domain"])
|
||||
}
|
||||
|
||||
dns, ok := cloud["dns"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("cloud.dns is not a map: %T", cloud["dns"])
|
||||
}
|
||||
|
||||
if dns["ip"] != "1.2.3.4" {
|
||||
t.Errorf("Expected dns.ip='1.2.3.4', got %v", dns["ip"])
|
||||
}
|
||||
if dns["port"] != 53 {
|
||||
t.Errorf("Expected dns.port=53, got %v", dns["port"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_EmptyFileCreation(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Truncate the config file to make it empty (but still exists)
|
||||
if err := storage.WriteFile(configPath, []byte(""), 0644); err != nil {
|
||||
t.Fatalf("Failed to empty config file: %v", err)
|
||||
}
|
||||
|
||||
// Update should populate empty file
|
||||
updateData := map[string]interface{}{
|
||||
"domain": "new.com",
|
||||
"email": "admin@new.com",
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify content
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if result["domain"] != "new.com" {
|
||||
t.Errorf("Expected domain='new.com', got %v", result["domain"])
|
||||
}
|
||||
if result["email"] != "admin@new.com" {
|
||||
t.Errorf("Expected email='admin@new.com', got %v", result["email"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_EmptyUpdate(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Create initial config
|
||||
initialConfig := map[string]interface{}{
|
||||
"domain": "test.com",
|
||||
}
|
||||
initialYAML, _ := yaml.Marshal(initialConfig)
|
||||
if err := storage.WriteFile(configPath, initialYAML, 0644); err != nil {
|
||||
t.Fatalf("Failed to write initial config: %v", err)
|
||||
}
|
||||
|
||||
// Empty update
|
||||
updateData := map[string]interface{}{}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify file unchanged
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if result["domain"] != "test.com" {
|
||||
t.Errorf("Expected domain='test.com', got %v", result["domain"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_YAMLFormatting(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Update with complex nested structure
|
||||
updateData := map[string]interface{}{
|
||||
"cloud": map[string]interface{}{
|
||||
"domain": "test.com",
|
||||
"dns": map[string]interface{}{
|
||||
"ip": "1.2.3.4",
|
||||
},
|
||||
},
|
||||
"cluster": map[string]interface{}{
|
||||
"nodes": []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "node1",
|
||||
"ip": "10.0.0.1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"name": "node2",
|
||||
"ip": "10.0.0.2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify YAML formatting
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
resultStr := string(resultData)
|
||||
|
||||
// Should not contain Go map notation
|
||||
if bytes.Contains(resultData, []byte("map[")) {
|
||||
t.Errorf("Result contains Go map notation: %s", resultStr)
|
||||
}
|
||||
|
||||
// Should be valid YAML
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Result is not valid YAML: %v", err)
|
||||
}
|
||||
|
||||
// Should have proper indentation (check for nested structure indicators)
|
||||
if !bytes.Contains(resultData, []byte(" ")) {
|
||||
t.Error("Result appears to lack proper indentation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_InvalidYAML(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
// Send invalid YAML
|
||||
invalidYAML := []byte("invalid: yaml: content: [")
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(invalidYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected status 400, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_InvalidInstance(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
|
||||
updateData := map[string]interface{}{
|
||||
"domain": "test.com",
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/nonexistent/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": "nonexistent"}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("Expected status 404, got %d", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_FilePermissions(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
updateData := map[string]interface{}{
|
||||
"domain": "test.com",
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Check file permissions
|
||||
info, err := os.Stat(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to stat config file: %v", err)
|
||||
}
|
||||
|
||||
expectedPerm := os.FileMode(0644)
|
||||
if info.Mode().Perm() != expectedPerm {
|
||||
t.Errorf("Expected permissions %v, got %v", expectedPerm, info.Mode().Perm())
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_UpdateSecrets(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
secretsPath := api.instance.GetInstanceSecretsPath(instanceName)
|
||||
|
||||
// Update secrets
|
||||
updateData := map[string]interface{}{
|
||||
"dbPassword": "secret123",
|
||||
"apiKey": "key456",
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/secrets", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateSecrets(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify secrets file created and contains data
|
||||
resultData, err := storage.ReadFile(secretsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read secrets: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse secrets: %v", err)
|
||||
}
|
||||
|
||||
if result["dbPassword"] != "secret123" {
|
||||
t.Errorf("Expected dbPassword='secret123', got %v", result["dbPassword"])
|
||||
}
|
||||
if result["apiKey"] != "key456" {
|
||||
t.Errorf("Expected apiKey='key456', got %v", result["apiKey"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_ConcurrentUpdates(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
// This test verifies that file locking prevents race conditions
|
||||
// We'll simulate concurrent updates and verify data integrity
|
||||
|
||||
numUpdates := 10
|
||||
done := make(chan bool, numUpdates)
|
||||
|
||||
for i := 0; i < numUpdates; i++ {
|
||||
go func(index int) {
|
||||
updateData := map[string]interface{}{
|
||||
"counter": index,
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
done <- w.Code == http.StatusOK
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all updates to complete
|
||||
successCount := 0
|
||||
for i := 0; i < numUpdates; i++ {
|
||||
if <-done {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
|
||||
if successCount != numUpdates {
|
||||
t.Errorf("Expected %d successful updates, got %d", numUpdates, successCount)
|
||||
}
|
||||
|
||||
// Verify file is still valid YAML
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read final config: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Final config is not valid YAML: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateYAMLFile_PreservesComplexTypes(t *testing.T) {
|
||||
api, _ := setupTestAPI(t)
|
||||
instanceName := "test-instance"
|
||||
createTestInstance(t, api, instanceName)
|
||||
|
||||
configPath := api.instance.GetInstanceConfigPath(instanceName)
|
||||
|
||||
// Create config with various types
|
||||
updateData := map[string]interface{}{
|
||||
"stringValue": "text",
|
||||
"intValue": 42,
|
||||
"floatValue": 3.14,
|
||||
"boolValue": true,
|
||||
"arrayValue": []interface{}{"a", "b", "c"},
|
||||
"mapValue": map[string]interface{}{
|
||||
"nested": "value",
|
||||
},
|
||||
"nullValue": nil,
|
||||
}
|
||||
updateYAML, _ := yaml.Marshal(updateData)
|
||||
|
||||
req := httptest.NewRequest("PUT", "/api/v1/instances/"+instanceName+"/config", bytes.NewBuffer(updateYAML))
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
vars := map[string]string{"name": instanceName}
|
||||
req = mux.SetURLVars(req, vars)
|
||||
|
||||
api.UpdateConfig(w, req)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("Expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
// Verify types preserved
|
||||
resultData, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := yaml.Unmarshal(resultData, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if result["stringValue"] != "text" {
|
||||
t.Errorf("String value not preserved: %v", result["stringValue"])
|
||||
}
|
||||
if result["intValue"] != 42 {
|
||||
t.Errorf("Int value not preserved: %v", result["intValue"])
|
||||
}
|
||||
if result["floatValue"] != 3.14 {
|
||||
t.Errorf("Float value not preserved: %v", result["floatValue"])
|
||||
}
|
||||
if result["boolValue"] != true {
|
||||
t.Errorf("Bool value not preserved: %v", result["boolValue"])
|
||||
}
|
||||
|
||||
arrayValue, ok := result["arrayValue"].([]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Array not preserved as slice: %T", result["arrayValue"])
|
||||
} else if len(arrayValue) != 3 {
|
||||
t.Errorf("Array length not preserved: %d", len(arrayValue))
|
||||
}
|
||||
|
||||
mapValue, ok := result["mapValue"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Map not preserved: %T", result["mapValue"])
|
||||
} else if mapValue["nested"] != "value" {
|
||||
t.Errorf("Nested map value not preserved: %v", mapValue["nested"])
|
||||
}
|
||||
|
||||
if result["nullValue"] != nil {
|
||||
t.Errorf("Null value not preserved: %v", result["nullValue"])
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
// NodeDiscover initiates node discovery
|
||||
// Accepts optional subnet parameter. If no subnet provided, auto-detects local networks.
|
||||
func (api *API) NodeDiscover(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
@@ -22,10 +24,9 @@ func (api *API) NodeDiscover(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request body - support both subnet and ip_list formats
|
||||
// Parse request body - only subnet is supported
|
||||
var req struct {
|
||||
Subnet string `json:"subnet"`
|
||||
IPList []string `json:"ip_list"`
|
||||
Subnet string `json:"subnet,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
@@ -33,16 +34,38 @@ func (api *API) NodeDiscover(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// If subnet provided, use it as a single "IP" for discovery
|
||||
// The discovery manager will scan this subnet
|
||||
// Build IP list
|
||||
var ipList []string
|
||||
var err error
|
||||
|
||||
if req.Subnet != "" {
|
||||
ipList = []string{req.Subnet}
|
||||
} else if len(req.IPList) > 0 {
|
||||
ipList = req.IPList
|
||||
// Expand provided CIDR notation to individual IPs
|
||||
ipList, err = discovery.ExpandSubnet(req.Subnet)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusBadRequest, fmt.Sprintf("Invalid subnet: %v", err))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
respondError(w, http.StatusBadRequest, "subnet or ip_list is required")
|
||||
return
|
||||
// Auto-detect: Get local networks when no subnet provided
|
||||
networks, err := discovery.GetLocalNetworks()
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to detect local networks: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if len(networks) == 0 {
|
||||
respondError(w, http.StatusNotFound, "No local networks found")
|
||||
return
|
||||
}
|
||||
|
||||
// Expand all detected networks
|
||||
for _, network := range networks {
|
||||
ips, err := discovery.ExpandSubnet(network)
|
||||
if err != nil {
|
||||
continue // Skip invalid networks
|
||||
}
|
||||
ipList = append(ipList, ips...)
|
||||
}
|
||||
}
|
||||
|
||||
// Start discovery
|
||||
@@ -52,9 +75,10 @@ func (api *API) NodeDiscover(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusAccepted, map[string]string{
|
||||
"message": "Discovery started",
|
||||
"status": "running",
|
||||
respondJSON(w, http.StatusAccepted, map[string]interface{}{
|
||||
"message": "Discovery started",
|
||||
"status": "running",
|
||||
"ips_to_scan": len(ipList),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -92,7 +116,7 @@ func (api *API) NodeHardware(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Detect hardware
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
hwInfo, err := nodeMgr.DetectHardware(nodeIP)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to detect hardware: %v", err))
|
||||
@@ -103,6 +127,7 @@ func (api *API) NodeHardware(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// NodeDetect detects hardware on a single node (POST with IP in body)
|
||||
// IP address is required.
|
||||
func (api *API) NodeDetect(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
@@ -123,13 +148,14 @@ func (api *API) NodeDetect(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate IP is provided
|
||||
if req.IP == "" {
|
||||
respondError(w, http.StatusBadRequest, "ip is required")
|
||||
respondError(w, http.StatusBadRequest, "IP address is required")
|
||||
return
|
||||
}
|
||||
|
||||
// Detect hardware
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
// Detect hardware for specific IP
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
hwInfo, err := nodeMgr.DetectHardware(req.IP)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to detect hardware: %v", err))
|
||||
@@ -158,7 +184,7 @@ func (api *API) NodeAdd(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Add node
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
if err := nodeMgr.Add(instanceName, &nodeData); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to add node: %v", err))
|
||||
return
|
||||
@@ -182,7 +208,7 @@ func (api *API) NodeList(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// List nodes
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
nodes, err := nodeMgr.List(instanceName)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to list nodes: %v", err))
|
||||
@@ -207,7 +233,7 @@ func (api *API) NodeGet(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Get node
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
nodeData, err := nodeMgr.Get(instanceName, nodeIdentifier)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Node not found: %v", err))
|
||||
@@ -233,7 +259,7 @@ func (api *API) NodeApply(w http.ResponseWriter, r *http.Request) {
|
||||
opts := node.ApplyOptions{}
|
||||
|
||||
// Apply node configuration
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
if err := nodeMgr.Apply(instanceName, nodeIdentifier, opts); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to apply node configuration: %v", err))
|
||||
return
|
||||
@@ -265,7 +291,7 @@ func (api *API) NodeUpdate(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Update node
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
if err := nodeMgr.Update(instanceName, nodeIdentifier, updates); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to update node: %v", err))
|
||||
return
|
||||
@@ -289,7 +315,7 @@ func (api *API) NodeFetchTemplates(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Fetch templates
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
if err := nodeMgr.FetchTemplates(instanceName); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to fetch templates: %v", err))
|
||||
return
|
||||
@@ -301,6 +327,7 @@ func (api *API) NodeFetchTemplates(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// NodeDelete removes a node
|
||||
// Query parameter: skip_reset=true to force delete without resetting
|
||||
func (api *API) NodeDelete(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
@@ -312,14 +339,76 @@ func (api *API) NodeDelete(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete node
|
||||
nodeMgr := node.NewManager(api.dataDir)
|
||||
if err := nodeMgr.Delete(instanceName, nodeIdentifier); err != nil {
|
||||
// Parse skip_reset query parameter (default: false)
|
||||
skipReset := r.URL.Query().Get("skip_reset") == "true"
|
||||
|
||||
// Delete node (with reset unless skipReset=true)
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
if err := nodeMgr.Delete(instanceName, nodeIdentifier, skipReset); err != nil {
|
||||
// Check if it's a reset-related error
|
||||
errMsg := err.Error()
|
||||
if !skipReset && (strings.Contains(errMsg, "reset") || strings.Contains(errMsg, "timed out")) {
|
||||
respondError(w, http.StatusConflict, errMsg)
|
||||
return
|
||||
}
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to delete node: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
message := "Node deleted successfully"
|
||||
if !skipReset {
|
||||
message = "Node reset and removed successfully"
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "Node deleted successfully",
|
||||
"message": message,
|
||||
})
|
||||
}
|
||||
|
||||
// NodeDiscoveryCancel cancels an in-progress discovery operation
|
||||
func (api *API) NodeDiscoveryCancel(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
|
||||
// Validate instance exists
|
||||
if err := api.instance.ValidateInstance(instanceName); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Cancel discovery
|
||||
discoveryMgr := discovery.NewManager(api.dataDir, instanceName)
|
||||
if err := discoveryMgr.CancelDiscovery(instanceName); err != nil {
|
||||
respondError(w, http.StatusBadRequest, fmt.Sprintf("Failed to cancel discovery: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "Discovery cancelled successfully",
|
||||
})
|
||||
}
|
||||
|
||||
// NodeReset resets a node to maintenance mode
|
||||
func (api *API) NodeReset(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
instanceName := vars["name"]
|
||||
nodeIdentifier := vars["node"]
|
||||
|
||||
// Validate instance exists
|
||||
if err := api.instance.ValidateInstance(instanceName); err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Instance not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Reset node
|
||||
nodeMgr := node.NewManager(api.dataDir, instanceName)
|
||||
if err := nodeMgr.Reset(instanceName, nodeIdentifier); err != nil {
|
||||
respondError(w, http.StatusInternalServerError, fmt.Sprintf("Failed to reset node: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "Node reset successfully - now in maintenance mode",
|
||||
"node": nodeIdentifier,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -45,17 +45,9 @@ func (api *API) PXEListAssets(w http.ResponseWriter, r *http.Request) {
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Proxy to new asset system
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
schematic, err := assetsMgr.GetSchematic(schematicID)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Schematic not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"assets": schematic.Assets,
|
||||
"assets": []interface{}{},
|
||||
"message": "Please use the new /api/v1/pxe/assets endpoint with both schematic ID and version",
|
||||
})
|
||||
}
|
||||
|
||||
@@ -184,20 +176,7 @@ func (api *API) PXEGetAsset(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Proxy to new asset system - serve the file directly
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
assetPath, err := assetsMgr.GetAssetPath(schematicID, assetType)
|
||||
if err != nil {
|
||||
respondError(w, http.StatusNotFound, fmt.Sprintf("Asset not found: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
respondJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"type": assetType,
|
||||
"path": assetPath,
|
||||
"valid": true,
|
||||
"schematic_id": schematicID,
|
||||
})
|
||||
respondError(w, http.StatusBadRequest, "This deprecated endpoint requires version. Please use /api/v1/pxe/assets/{schematicId}/{version}/pxe/{assetType}")
|
||||
}
|
||||
|
||||
// PXEDeleteAsset deletes a PXE asset
|
||||
|
||||
@@ -39,9 +39,9 @@ func (api *API) SchematicGetInstanceSchematic(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// If schematic is configured, get asset status
|
||||
var assetStatus interface{}
|
||||
if schematicID != "" && schematicID != "null" {
|
||||
if schematicID != "" && schematicID != "null" && version != "" && version != "null" {
|
||||
assetsMgr := assets.NewManager(api.dataDir)
|
||||
status, err := assetsMgr.GetAssetStatus(schematicID)
|
||||
status, err := assetsMgr.GetAssetStatus(schematicID, version)
|
||||
if err == nil {
|
||||
assetStatus = status
|
||||
}
|
||||
|
||||
@@ -37,9 +37,9 @@ type EnhancedApp struct {
|
||||
|
||||
// RuntimeStatus contains runtime information from kubernetes
|
||||
type RuntimeStatus struct {
|
||||
Pods []PodInfo `json:"pods,omitempty"`
|
||||
Replicas *ReplicaInfo `json:"replicas,omitempty"`
|
||||
Resources *ResourceUsage `json:"resources,omitempty"`
|
||||
Pods []PodInfo `json:"pods,omitempty"`
|
||||
Replicas *ReplicaInfo `json:"replicas,omitempty"`
|
||||
Resources *ResourceUsage `json:"resources,omitempty"`
|
||||
RecentEvents []KubernetesEvent `json:"recentEvents,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -33,8 +33,8 @@ type Asset struct {
|
||||
Downloaded bool `json:"downloaded"` // Whether asset exists
|
||||
}
|
||||
|
||||
// Schematic represents a Talos schematic and its assets
|
||||
type Schematic struct {
|
||||
// PXEAsset represents a schematic@version combination and its assets
|
||||
type PXEAsset struct {
|
||||
SchematicID string `json:"schematic_id"`
|
||||
Version string `json:"version"`
|
||||
Path string `json:"path"`
|
||||
@@ -49,9 +49,10 @@ type AssetStatus struct {
|
||||
Complete bool `json:"complete"`
|
||||
}
|
||||
|
||||
// GetAssetDir returns the asset directory for a schematic
|
||||
func (m *Manager) GetAssetDir(schematicID string) string {
|
||||
return filepath.Join(m.dataDir, "assets", schematicID)
|
||||
// GetAssetDir returns the asset directory for a schematic@version composite key
|
||||
func (m *Manager) GetAssetDir(schematicID, version string) string {
|
||||
composite := fmt.Sprintf("%s@%s", schematicID, version)
|
||||
return filepath.Join(m.dataDir, "assets", composite)
|
||||
}
|
||||
|
||||
// GetAssetsRootDir returns the root assets directory
|
||||
@@ -59,8 +60,8 @@ func (m *Manager) GetAssetsRootDir() string {
|
||||
return filepath.Join(m.dataDir, "assets")
|
||||
}
|
||||
|
||||
// ListSchematics returns all available schematics
|
||||
func (m *Manager) ListSchematics() ([]Schematic, error) {
|
||||
// ListAssets returns all available schematic@version combinations
|
||||
func (m *Manager) ListAssets() ([]PXEAsset, error) {
|
||||
assetsDir := m.GetAssetsRootDir()
|
||||
|
||||
// Ensure assets directory exists
|
||||
@@ -73,52 +74,53 @@ func (m *Manager) ListSchematics() ([]Schematic, error) {
|
||||
return nil, fmt.Errorf("reading assets directory: %w", err)
|
||||
}
|
||||
|
||||
var schematics []Schematic
|
||||
var assets []PXEAsset
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
schematicID := entry.Name()
|
||||
schematic, err := m.GetSchematic(schematicID)
|
||||
if err != nil {
|
||||
// Skip invalid schematics
|
||||
// Parse directory name as schematicID@version
|
||||
parts := strings.SplitN(entry.Name(), "@", 2)
|
||||
if len(parts) != 2 {
|
||||
// Skip invalid directory names (old format or other)
|
||||
continue
|
||||
}
|
||||
schematics = append(schematics, *schematic)
|
||||
schematicID := parts[0]
|
||||
version := parts[1]
|
||||
|
||||
asset, err := m.GetAsset(schematicID, version)
|
||||
if err != nil {
|
||||
// Skip invalid assets
|
||||
continue
|
||||
}
|
||||
assets = append(assets, *asset)
|
||||
}
|
||||
}
|
||||
|
||||
return schematics, nil
|
||||
return assets, nil
|
||||
}
|
||||
|
||||
// GetSchematic returns details for a specific schematic
|
||||
func (m *Manager) GetSchematic(schematicID string) (*Schematic, error) {
|
||||
// GetAsset returns details for a specific schematic@version combination
|
||||
func (m *Manager) GetAsset(schematicID, version string) (*PXEAsset, error) {
|
||||
if schematicID == "" {
|
||||
return nil, fmt.Errorf("schematic ID cannot be empty")
|
||||
}
|
||||
if version == "" {
|
||||
return nil, fmt.Errorf("version cannot be empty")
|
||||
}
|
||||
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
// Check if schematic directory exists
|
||||
// Check if asset directory exists
|
||||
if !storage.FileExists(assetDir) {
|
||||
return nil, fmt.Errorf("schematic %s not found", schematicID)
|
||||
return nil, fmt.Errorf("asset %s@%s not found", schematicID, version)
|
||||
}
|
||||
|
||||
// List assets for this schematic
|
||||
assets, err := m.listSchematicAssets(schematicID)
|
||||
// List assets for this schematic@version
|
||||
assets, err := m.listAssetFiles(schematicID, version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing schematic assets: %w", err)
|
||||
return nil, fmt.Errorf("listing assets: %w", err)
|
||||
}
|
||||
|
||||
// Try to determine version from version file
|
||||
version := ""
|
||||
versionPath := filepath.Join(assetDir, "version.txt")
|
||||
if storage.FileExists(versionPath) {
|
||||
data, err := os.ReadFile(versionPath)
|
||||
if err == nil {
|
||||
version = strings.TrimSpace(string(data))
|
||||
}
|
||||
}
|
||||
|
||||
return &Schematic{
|
||||
return &PXEAsset{
|
||||
SchematicID: schematicID,
|
||||
Version: version,
|
||||
Path: assetDir,
|
||||
@@ -126,9 +128,14 @@ func (m *Manager) GetSchematic(schematicID string) (*Schematic, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// listSchematicAssets lists all assets for a schematic
|
||||
func (m *Manager) listSchematicAssets(schematicID string) ([]Asset, error) {
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
// AssetExists checks if a schematic@version exists
|
||||
func (m *Manager) AssetExists(schematicID, version string) bool {
|
||||
return storage.FileExists(m.GetAssetDir(schematicID, version))
|
||||
}
|
||||
|
||||
// listAssetFiles lists all asset files for a schematic@version
|
||||
func (m *Manager) listAssetFiles(schematicID, version string) ([]Asset, error) {
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
var assets []Asset
|
||||
|
||||
@@ -221,19 +228,13 @@ func (m *Manager) DownloadAssets(schematicID, version, platform string, assetTyp
|
||||
assetTypes = []string{"kernel", "initramfs", "iso"}
|
||||
}
|
||||
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
// Ensure asset directory exists
|
||||
if err := storage.EnsureDir(assetDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating asset directory: %w", err)
|
||||
}
|
||||
|
||||
// Save version info
|
||||
versionPath := filepath.Join(assetDir, "version.txt")
|
||||
if err := os.WriteFile(versionPath, []byte(version), 0644); err != nil {
|
||||
return fmt.Errorf("saving version info: %w", err)
|
||||
}
|
||||
|
||||
// Download each requested asset
|
||||
for _, assetType := range assetTypes {
|
||||
if err := m.downloadAsset(schematicID, assetType, version, platform); err != nil {
|
||||
@@ -246,7 +247,7 @@ func (m *Manager) DownloadAssets(schematicID, version, platform string, assetTyp
|
||||
|
||||
// downloadAsset downloads a single asset
|
||||
func (m *Manager) downloadAsset(schematicID, assetType, version, platform string) error {
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
// Determine subdirectory, filename, and URL based on asset type and platform
|
||||
var subdir, filename, urlPath string
|
||||
@@ -261,7 +262,7 @@ func (m *Manager) downloadAsset(schematicID, assetType, version, platform string
|
||||
urlPath = fmt.Sprintf("initramfs-%s.xz", platform)
|
||||
case "iso":
|
||||
subdir = "iso"
|
||||
// Preserve version and platform in filename for clarity
|
||||
// Include version in filename for clarity
|
||||
filename = fmt.Sprintf("talos-%s-metal-%s.iso", version, platform)
|
||||
urlPath = fmt.Sprintf("metal-%s.iso", platform)
|
||||
default:
|
||||
@@ -322,31 +323,24 @@ func (m *Manager) downloadAsset(schematicID, assetType, version, platform string
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAssetStatus returns the download status for a schematic
|
||||
func (m *Manager) GetAssetStatus(schematicID string) (*AssetStatus, error) {
|
||||
// GetAssetStatus returns the download status for a schematic@version
|
||||
func (m *Manager) GetAssetStatus(schematicID, version string) (*AssetStatus, error) {
|
||||
if schematicID == "" {
|
||||
return nil, fmt.Errorf("schematic ID cannot be empty")
|
||||
}
|
||||
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
|
||||
// Check if schematic directory exists
|
||||
if !storage.FileExists(assetDir) {
|
||||
return nil, fmt.Errorf("schematic %s not found", schematicID)
|
||||
if version == "" {
|
||||
return nil, fmt.Errorf("version cannot be empty")
|
||||
}
|
||||
|
||||
// Get version
|
||||
version := ""
|
||||
versionPath := filepath.Join(assetDir, "version.txt")
|
||||
if storage.FileExists(versionPath) {
|
||||
data, err := os.ReadFile(versionPath)
|
||||
if err == nil {
|
||||
version = strings.TrimSpace(string(data))
|
||||
}
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
// Check if asset directory exists
|
||||
if !storage.FileExists(assetDir) {
|
||||
return nil, fmt.Errorf("asset %s@%s not found", schematicID, version)
|
||||
}
|
||||
|
||||
// List assets
|
||||
assets, err := m.listSchematicAssets(schematicID)
|
||||
assets, err := m.listAssetFiles(schematicID, version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing assets: %w", err)
|
||||
}
|
||||
@@ -370,12 +364,15 @@ func (m *Manager) GetAssetStatus(schematicID string) (*AssetStatus, error) {
|
||||
}
|
||||
|
||||
// GetAssetPath returns the path to a specific asset file
|
||||
func (m *Manager) GetAssetPath(schematicID, assetType string) (string, error) {
|
||||
func (m *Manager) GetAssetPath(schematicID, version, assetType string) (string, error) {
|
||||
if schematicID == "" {
|
||||
return "", fmt.Errorf("schematic ID cannot be empty")
|
||||
}
|
||||
if version == "" {
|
||||
return "", fmt.Errorf("version cannot be empty")
|
||||
}
|
||||
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
var subdir, pattern string
|
||||
switch assetType {
|
||||
@@ -387,7 +384,7 @@ func (m *Manager) GetAssetPath(schematicID, assetType string) (string, error) {
|
||||
pattern = "initramfs-amd64.xz"
|
||||
case "iso":
|
||||
subdir = "iso"
|
||||
pattern = "talos-*.iso" // Glob pattern for version-specific filename
|
||||
pattern = "talos-*.iso" // Glob pattern for version and platform-specific filename
|
||||
default:
|
||||
return "", fmt.Errorf("unknown asset type: %s", assetType)
|
||||
}
|
||||
@@ -416,13 +413,16 @@ func (m *Manager) GetAssetPath(schematicID, assetType string) (string, error) {
|
||||
return assetPath, nil
|
||||
}
|
||||
|
||||
// DeleteSchematic removes a schematic and all its assets
|
||||
func (m *Manager) DeleteSchematic(schematicID string) error {
|
||||
// DeleteAsset removes a schematic@version and all its assets
|
||||
func (m *Manager) DeleteAsset(schematicID, version string) error {
|
||||
if schematicID == "" {
|
||||
return fmt.Errorf("schematic ID cannot be empty")
|
||||
}
|
||||
if version == "" {
|
||||
return fmt.Errorf("version cannot be empty")
|
||||
}
|
||||
|
||||
assetDir := m.GetAssetDir(schematicID)
|
||||
assetDir := m.GetAssetDir(schematicID, version)
|
||||
|
||||
if !storage.FileExists(assetDir) {
|
||||
return nil // Already deleted, idempotent
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/operations"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/storage"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/tools"
|
||||
)
|
||||
@@ -18,13 +20,15 @@ import (
|
||||
type Manager struct {
|
||||
dataDir string
|
||||
talosctl *tools.Talosctl
|
||||
opsMgr *operations.Manager
|
||||
}
|
||||
|
||||
// NewManager creates a new cluster manager
|
||||
func NewManager(dataDir string) *Manager {
|
||||
func NewManager(dataDir string, opsMgr *operations.Manager) *Manager {
|
||||
return &Manager{
|
||||
dataDir: dataDir,
|
||||
talosctl: tools.NewTalosctl(),
|
||||
opsMgr: opsMgr,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,15 +39,24 @@ type ClusterConfig struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// NodeStatus represents the health status of a single node
|
||||
type NodeStatus struct {
|
||||
Hostname string `json:"hostname"`
|
||||
Ready bool `json:"ready"`
|
||||
KubernetesReady bool `json:"kubernetes_ready"`
|
||||
Role string `json:"role"` // "control-plane" or "worker"
|
||||
}
|
||||
|
||||
// ClusterStatus represents cluster health and status
|
||||
type ClusterStatus struct {
|
||||
Status string `json:"status"` // ready, pending, error
|
||||
Nodes int `json:"nodes"`
|
||||
ControlPlaneNodes int `json:"control_plane_nodes"`
|
||||
WorkerNodes int `json:"worker_nodes"`
|
||||
KubernetesVersion string `json:"kubernetes_version"`
|
||||
TalosVersion string `json:"talos_version"`
|
||||
Services map[string]string `json:"services"`
|
||||
Status string `json:"status"` // ready, pending, error
|
||||
Nodes int `json:"nodes"`
|
||||
ControlPlaneNodes int `json:"control_plane_nodes"`
|
||||
WorkerNodes int `json:"worker_nodes"`
|
||||
KubernetesVersion string `json:"kubernetes_version"`
|
||||
TalosVersion string `json:"talos_version"`
|
||||
Services map[string]string `json:"services"`
|
||||
NodeStatuses map[string]NodeStatus `json:"node_statuses,omitempty"`
|
||||
}
|
||||
|
||||
// GetTalosDir returns the talos directory for an instance
|
||||
@@ -96,11 +109,28 @@ func (m *Manager) GenerateConfig(instanceName string, config *ClusterConfig) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bootstrap bootstraps the cluster on the specified node
|
||||
func (m *Manager) Bootstrap(instanceName, nodeName string) error {
|
||||
// Get node configuration to find the target IP
|
||||
configPath := tools.GetInstanceConfigPath(m.dataDir, instanceName)
|
||||
// Bootstrap bootstraps the cluster on the specified node with progress tracking
|
||||
func (m *Manager) Bootstrap(instanceName, nodeName string) (string, error) {
|
||||
// Create operation for tracking
|
||||
opID, err := m.opsMgr.Start(instanceName, "bootstrap", nodeName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to start bootstrap operation: %w", err)
|
||||
}
|
||||
|
||||
// Run bootstrap asynchronously
|
||||
go func() {
|
||||
if err := m.runBootstrapWithTracking(instanceName, nodeName, opID); err != nil {
|
||||
_ = m.opsMgr.Update(instanceName, opID, "failed", err.Error(), 0)
|
||||
}
|
||||
}()
|
||||
|
||||
return opID, nil
|
||||
}
|
||||
|
||||
// runBootstrapWithTracking runs the bootstrap process with detailed progress tracking
|
||||
func (m *Manager) runBootstrapWithTracking(instanceName, nodeName, opID string) error {
|
||||
ctx := context.Background()
|
||||
configPath := tools.GetInstanceConfigPath(m.dataDir, instanceName)
|
||||
yq := tools.NewYQ()
|
||||
|
||||
// Get node's target IP
|
||||
@@ -114,17 +144,71 @@ func (m *Manager) Bootstrap(instanceName, nodeName string) error {
|
||||
return fmt.Errorf("node %s does not have a target IP configured", nodeName)
|
||||
}
|
||||
|
||||
// Get talosconfig path for this instance
|
||||
// Get VIP
|
||||
vipRaw, err := yq.Get(configPath, ".cluster.nodes.control.vip")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get VIP: %w", err)
|
||||
}
|
||||
|
||||
vip := tools.CleanYQOutput(vipRaw)
|
||||
if vip == "" || vip == "null" {
|
||||
return fmt.Errorf("control plane VIP not configured")
|
||||
}
|
||||
|
||||
// Step 0: Run talosctl bootstrap
|
||||
if err := m.runBootstrapCommand(instanceName, nodeIP, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 1: Wait for etcd health
|
||||
if err := m.waitForEtcd(ctx, instanceName, nodeIP, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 2: Wait for VIP assignment
|
||||
if err := m.waitForVIP(ctx, instanceName, nodeIP, vip, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 3: Wait for control plane components
|
||||
if err := m.waitForControlPlane(ctx, instanceName, nodeIP, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 4: Wait for API server on VIP
|
||||
if err := m.waitForAPIServer(ctx, instanceName, vip, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 5: Configure cluster access
|
||||
if err := m.configureClusterAccess(instanceName, vip, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Step 6: Verify node registration
|
||||
if err := m.waitForNodeRegistration(ctx, instanceName, opID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark as completed
|
||||
_ = m.opsMgr.Update(instanceName, opID, "completed", "Bootstrap completed successfully", 100)
|
||||
return nil
|
||||
}
|
||||
|
||||
// runBootstrapCommand executes the initial bootstrap command
|
||||
func (m *Manager) runBootstrapCommand(instanceName, nodeIP, opID string) error {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 0, "bootstrap", 1, 1, "Running talosctl bootstrap command")
|
||||
|
||||
talosconfigPath := tools.GetTalosconfigPath(m.dataDir, instanceName)
|
||||
|
||||
// Set talosctl endpoint (with proper context via TALOSCONFIG env var)
|
||||
// Set talosctl endpoint
|
||||
cmdEndpoint := exec.Command("talosctl", "config", "endpoint", nodeIP)
|
||||
tools.WithTalosconfig(cmdEndpoint, talosconfigPath)
|
||||
if output, err := cmdEndpoint.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set talosctl endpoint: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Bootstrap command (with proper context via TALOSCONFIG env var)
|
||||
// Bootstrap command
|
||||
cmd := exec.Command("talosctl", "bootstrap", "--nodes", nodeIP)
|
||||
tools.WithTalosconfig(cmd, talosconfigPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
@@ -132,16 +216,152 @@ func (m *Manager) Bootstrap(instanceName, nodeName string) error {
|
||||
return fmt.Errorf("failed to bootstrap cluster: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Retrieve kubeconfig after bootstrap (best-effort with retry)
|
||||
log.Printf("Waiting for Kubernetes API server to become ready...")
|
||||
if err := m.retrieveKubeconfigFromCluster(instanceName, nodeIP, 5*time.Minute); err != nil {
|
||||
log.Printf("Warning: %v", err)
|
||||
log.Printf("You can retrieve it manually later using: wild cluster kubeconfig --generate")
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForEtcd waits for etcd to become healthy
|
||||
func (m *Manager) waitForEtcd(ctx context.Context, instanceName, nodeIP, opID string) error {
|
||||
maxAttempts := 30
|
||||
talosconfigPath := tools.GetTalosconfigPath(m.dataDir, instanceName)
|
||||
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 1, "etcd", attempt, maxAttempts, "Waiting for etcd to become healthy")
|
||||
|
||||
cmd := exec.Command("talosctl", "-n", nodeIP, "etcd", "status")
|
||||
tools.WithTalosconfig(cmd, talosconfigPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err == nil && strings.Contains(string(output), nodeIP) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if attempt < maxAttempts {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("etcd did not become healthy after %d attempts", maxAttempts)
|
||||
}
|
||||
|
||||
// waitForVIP waits for VIP to be assigned to the node
|
||||
func (m *Manager) waitForVIP(ctx context.Context, instanceName, nodeIP, vip, opID string) error {
|
||||
maxAttempts := 90
|
||||
talosconfigPath := tools.GetTalosconfigPath(m.dataDir, instanceName)
|
||||
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 2, "vip", attempt, maxAttempts, "Waiting for VIP assignment")
|
||||
|
||||
cmd := exec.Command("talosctl", "-n", nodeIP, "get", "addresses")
|
||||
tools.WithTalosconfig(cmd, talosconfigPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err == nil && strings.Contains(string(output), vip+"/32") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if attempt < maxAttempts {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("VIP was not assigned after %d attempts", maxAttempts)
|
||||
}
|
||||
|
||||
// waitForControlPlane waits for control plane components to start
|
||||
func (m *Manager) waitForControlPlane(ctx context.Context, instanceName, nodeIP, opID string) error {
|
||||
maxAttempts := 60
|
||||
talosconfigPath := tools.GetTalosconfigPath(m.dataDir, instanceName)
|
||||
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 3, "controlplane", attempt, maxAttempts, "Waiting for control plane components")
|
||||
|
||||
cmd := exec.Command("talosctl", "-n", nodeIP, "containers", "-k")
|
||||
tools.WithTalosconfig(cmd, talosconfigPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err == nil && strings.Contains(string(output), "kube-") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if attempt < maxAttempts {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("control plane components did not start after %d attempts", maxAttempts)
|
||||
}
|
||||
|
||||
// waitForAPIServer waits for Kubernetes API server to respond
|
||||
func (m *Manager) waitForAPIServer(ctx context.Context, instanceName, vip, opID string) error {
|
||||
maxAttempts := 60
|
||||
apiURL := fmt.Sprintf("https://%s:6443/healthz", vip)
|
||||
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 4, "apiserver", attempt, maxAttempts, "Waiting for Kubernetes API server")
|
||||
|
||||
cmd := exec.Command("curl", "-k", "-s", "--max-time", "5", apiURL)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err == nil && strings.Contains(string(output), "ok") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if attempt < maxAttempts {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("API server did not respond after %d attempts", maxAttempts)
|
||||
}
|
||||
|
||||
// configureClusterAccess configures talosctl and kubectl to use the VIP
|
||||
func (m *Manager) configureClusterAccess(instanceName, vip, opID string) error {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 5, "configure", 1, 1, "Configuring cluster access")
|
||||
|
||||
talosconfigPath := tools.GetTalosconfigPath(m.dataDir, instanceName)
|
||||
kubeconfigPath := tools.GetKubeconfigPath(m.dataDir, instanceName)
|
||||
|
||||
// Set talosctl endpoint to VIP
|
||||
cmdEndpoint := exec.Command("talosctl", "config", "endpoint", vip)
|
||||
tools.WithTalosconfig(cmdEndpoint, talosconfigPath)
|
||||
if output, err := cmdEndpoint.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set talosctl endpoint: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
// Retrieve kubeconfig
|
||||
cmdKubeconfig := exec.Command("talosctl", "kubeconfig", "--nodes", vip, kubeconfigPath)
|
||||
tools.WithTalosconfig(cmdKubeconfig, talosconfigPath)
|
||||
if output, err := cmdKubeconfig.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to retrieve kubeconfig: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForNodeRegistration waits for the node to register with Kubernetes
|
||||
func (m *Manager) waitForNodeRegistration(ctx context.Context, instanceName, opID string) error {
|
||||
maxAttempts := 10
|
||||
kubeconfigPath := tools.GetKubeconfigPath(m.dataDir, instanceName)
|
||||
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
_ = m.opsMgr.UpdateBootstrapProgress(instanceName, opID, 6, "nodes", attempt, maxAttempts, "Waiting for node registration")
|
||||
|
||||
cmd := exec.Command("kubectl", "get", "nodes")
|
||||
tools.WithKubeconfig(cmd, kubeconfigPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err == nil && strings.Contains(string(output), "Ready") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if attempt < maxAttempts {
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("node did not register after %d attempts", maxAttempts)
|
||||
}
|
||||
|
||||
// retrieveKubeconfigFromCluster retrieves kubeconfig from the cluster with retry logic
|
||||
func (m *Manager) retrieveKubeconfigFromCluster(instanceName, nodeIP string, timeout time.Duration) error {
|
||||
kubeconfigPath := tools.GetKubeconfigPath(m.dataDir, instanceName)
|
||||
@@ -284,6 +504,7 @@ func (m *Manager) GetStatus(instanceName string) (*ClusterStatus, error) {
|
||||
var nodesResult struct {
|
||||
Items []struct {
|
||||
Metadata struct {
|
||||
Name string `json:"name"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
} `json:"metadata"`
|
||||
Status struct {
|
||||
@@ -328,20 +549,38 @@ func (m *Manager) GetStatus(instanceName string) (*ClusterStatus, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Count control plane and worker nodes
|
||||
// Count control plane and worker nodes, and populate per-node status
|
||||
status.NodeStatuses = make(map[string]NodeStatus)
|
||||
|
||||
for _, node := range nodesResult.Items {
|
||||
hostname := node.Metadata.Name // K8s node name is hostname
|
||||
|
||||
role := "worker"
|
||||
if _, isControl := node.Metadata.Labels["node-role.kubernetes.io/control-plane"]; isControl {
|
||||
role = "control-plane"
|
||||
status.ControlPlaneNodes++
|
||||
} else {
|
||||
status.WorkerNodes++
|
||||
}
|
||||
|
||||
// Check if node is ready
|
||||
nodeReady := false
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == "Ready" && cond.Status != "True" {
|
||||
status.Status = "degraded"
|
||||
if cond.Type == "Ready" {
|
||||
nodeReady = (cond.Status == "True")
|
||||
if !nodeReady {
|
||||
status.Status = "degraded"
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
status.NodeStatuses[hostname] = NodeStatus{
|
||||
Hostname: hostname,
|
||||
Ready: true, // In K8s means it's reachable
|
||||
KubernetesReady: nodeReady,
|
||||
Role: role,
|
||||
}
|
||||
}
|
||||
|
||||
// Check basic service status
|
||||
|
||||
714
internal/config/config_test.go
Normal file
714
internal/config/config_test.go
Normal file
@@ -0,0 +1,714 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test: LoadGlobalConfig loads valid configuration
|
||||
func TestLoadGlobalConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configYAML string
|
||||
verify func(t *testing.T, config *GlobalConfig)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "loads complete configuration",
|
||||
configYAML: `wildcloud:
|
||||
repository: "https://github.com/example/repo"
|
||||
currentPhase: "setup"
|
||||
completedPhases:
|
||||
- "phase1"
|
||||
- "phase2"
|
||||
server:
|
||||
port: 8080
|
||||
host: "localhost"
|
||||
operator:
|
||||
email: "admin@example.com"
|
||||
cloud:
|
||||
dns:
|
||||
ip: "192.168.1.1"
|
||||
externalResolver: "8.8.8.8"
|
||||
router:
|
||||
ip: "192.168.1.254"
|
||||
dynamicDns: "example.dyndns.org"
|
||||
dnsmasq:
|
||||
interface: "eth0"
|
||||
cluster:
|
||||
endpointIp: "192.168.1.100"
|
||||
nodes:
|
||||
talos:
|
||||
version: "v1.8.0"
|
||||
`,
|
||||
verify: func(t *testing.T, config *GlobalConfig) {
|
||||
if config.Wildcloud.Repository != "https://github.com/example/repo" {
|
||||
t.Error("repository not loaded correctly")
|
||||
}
|
||||
if config.Server.Port != 8080 {
|
||||
t.Error("port not loaded correctly")
|
||||
}
|
||||
if config.Cloud.DNS.IP != "192.168.1.1" {
|
||||
t.Error("DNS IP not loaded correctly")
|
||||
}
|
||||
if config.Cluster.EndpointIP != "192.168.1.100" {
|
||||
t.Error("endpoint IP not loaded correctly")
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "applies default values",
|
||||
configYAML: `cloud:
|
||||
dns:
|
||||
ip: "192.168.1.1"
|
||||
cluster:
|
||||
nodes:
|
||||
talos:
|
||||
version: "v1.8.0"
|
||||
`,
|
||||
verify: func(t *testing.T, config *GlobalConfig) {
|
||||
if config.Server.Port != 5055 {
|
||||
t.Errorf("default port not applied, got %d, want 5055", config.Server.Port)
|
||||
}
|
||||
if config.Server.Host != "0.0.0.0" {
|
||||
t.Errorf("default host not applied, got %q, want %q", config.Server.Host, "0.0.0.0")
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "preserves custom port and host",
|
||||
configYAML: `server:
|
||||
port: 9000
|
||||
host: "127.0.0.1"
|
||||
cloud:
|
||||
dns:
|
||||
ip: "192.168.1.1"
|
||||
cluster:
|
||||
nodes:
|
||||
talos:
|
||||
version: "v1.8.0"
|
||||
`,
|
||||
verify: func(t *testing.T, config *GlobalConfig) {
|
||||
if config.Server.Port != 9000 {
|
||||
t.Errorf("custom port not preserved, got %d, want 9000", config.Server.Port)
|
||||
}
|
||||
if config.Server.Host != "127.0.0.1" {
|
||||
t.Errorf("custom host not preserved, got %q, want %q", config.Server.Host, "127.0.0.1")
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(tt.configYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
config, err := LoadGlobalConfig(configPath)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
t.Fatal("config is nil")
|
||||
}
|
||||
|
||||
if tt.verify != nil {
|
||||
tt.verify(t, config)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: LoadGlobalConfig error cases
|
||||
func TestLoadGlobalConfig_Errors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFunc func(t *testing.T) string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "non-existent file",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
return filepath.Join(t.TempDir(), "nonexistent.yaml")
|
||||
},
|
||||
errContains: "reading config file",
|
||||
},
|
||||
{
|
||||
name: "invalid yaml",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
content := `invalid: yaml: [[[`
|
||||
if err := os.WriteFile(configPath, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
return configPath
|
||||
},
|
||||
errContains: "parsing config file",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configPath := tt.setupFunc(t)
|
||||
_, err := LoadGlobalConfig(configPath)
|
||||
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: SaveGlobalConfig saves configuration correctly
|
||||
func TestSaveGlobalConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config *GlobalConfig
|
||||
verify func(t *testing.T, configPath string)
|
||||
}{
|
||||
{
|
||||
name: "saves complete configuration",
|
||||
config: &GlobalConfig{
|
||||
Wildcloud: struct {
|
||||
Repository string `yaml:"repository" json:"repository"`
|
||||
CurrentPhase string `yaml:"currentPhase" json:"currentPhase"`
|
||||
CompletedPhases []string `yaml:"completedPhases" json:"completedPhases"`
|
||||
}{
|
||||
Repository: "https://github.com/example/repo",
|
||||
CurrentPhase: "setup",
|
||||
CompletedPhases: []string{"phase1", "phase2"},
|
||||
},
|
||||
Server: struct {
|
||||
Port int `yaml:"port" json:"port"`
|
||||
Host string `yaml:"host" json:"host"`
|
||||
}{
|
||||
Port: 8080,
|
||||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, configPath string) {
|
||||
content, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read saved config: %v", err)
|
||||
}
|
||||
contentStr := string(content)
|
||||
if !strings.Contains(contentStr, "repository") {
|
||||
t.Error("saved config missing repository field")
|
||||
}
|
||||
if !strings.Contains(contentStr, "8080") {
|
||||
t.Error("saved config missing port value")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "saves empty configuration",
|
||||
config: &GlobalConfig{},
|
||||
verify: func(t *testing.T, configPath string) {
|
||||
if _, err := os.Stat(configPath); os.IsNotExist(err) {
|
||||
t.Error("config file not created")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "subdir", "config.yaml")
|
||||
|
||||
err := SaveGlobalConfig(tt.config, configPath)
|
||||
if err != nil {
|
||||
t.Errorf("SaveGlobalConfig failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify file exists
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
t.Errorf("config file not created: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify file permissions
|
||||
info, err := os.Stat(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat config file: %v", err)
|
||||
}
|
||||
if info.Mode().Perm() != 0644 {
|
||||
t.Errorf("expected permissions 0644, got %v", info.Mode().Perm())
|
||||
}
|
||||
|
||||
// Verify content can be loaded back
|
||||
loadedConfig, err := LoadGlobalConfig(configPath)
|
||||
if err != nil {
|
||||
t.Errorf("failed to reload saved config: %v", err)
|
||||
} else if loadedConfig == nil {
|
||||
t.Error("loaded config is nil")
|
||||
}
|
||||
|
||||
if tt.verify != nil {
|
||||
tt.verify(t, configPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: SaveGlobalConfig creates directory
|
||||
func TestSaveGlobalConfig_CreatesDirectory(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "nested", "dirs", "config.yaml")
|
||||
|
||||
config := &GlobalConfig{}
|
||||
err := SaveGlobalConfig(config, configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("SaveGlobalConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify nested directories were created
|
||||
if _, err := os.Stat(filepath.Dir(configPath)); err != nil {
|
||||
t.Errorf("directory not created: %v", err)
|
||||
}
|
||||
|
||||
// Verify file exists
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
t.Errorf("config file not created: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test: GlobalConfig.IsEmpty checks if config is empty
|
||||
func TestGlobalConfig_IsEmpty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config *GlobalConfig
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "nil config is empty",
|
||||
config: nil,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "default config is empty",
|
||||
config: &GlobalConfig{},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "config with only DNS IP is empty",
|
||||
config: &GlobalConfig{
|
||||
Cloud: struct {
|
||||
DNS struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
ExternalResolver string `yaml:"externalResolver" json:"externalResolver"`
|
||||
} `yaml:"dns" json:"dns"`
|
||||
Router struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
DynamicDns string `yaml:"dynamicDns" json:"dynamicDns"`
|
||||
} `yaml:"router" json:"router"`
|
||||
Dnsmasq struct {
|
||||
Interface string `yaml:"interface" json:"interface"`
|
||||
} `yaml:"dnsmasq" json:"dnsmasq"`
|
||||
}{
|
||||
DNS: struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
ExternalResolver string `yaml:"externalResolver" json:"externalResolver"`
|
||||
}{
|
||||
IP: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "config with only Talos version is empty",
|
||||
config: &GlobalConfig{
|
||||
Cluster: struct {
|
||||
EndpointIP string `yaml:"endpointIp" json:"endpointIp"`
|
||||
Nodes struct {
|
||||
Talos struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
} `yaml:"talos" json:"talos"`
|
||||
} `yaml:"nodes" json:"nodes"`
|
||||
}{
|
||||
Nodes: struct {
|
||||
Talos struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
} `yaml:"talos" json:"talos"`
|
||||
}{
|
||||
Talos: struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
}{
|
||||
Version: "v1.8.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "config with both DNS IP and Talos version is not empty",
|
||||
config: &GlobalConfig{
|
||||
Cloud: struct {
|
||||
DNS struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
ExternalResolver string `yaml:"externalResolver" json:"externalResolver"`
|
||||
} `yaml:"dns" json:"dns"`
|
||||
Router struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
DynamicDns string `yaml:"dynamicDns" json:"dynamicDns"`
|
||||
} `yaml:"router" json:"router"`
|
||||
Dnsmasq struct {
|
||||
Interface string `yaml:"interface" json:"interface"`
|
||||
} `yaml:"dnsmasq" json:"dnsmasq"`
|
||||
}{
|
||||
DNS: struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
ExternalResolver string `yaml:"externalResolver" json:"externalResolver"`
|
||||
}{
|
||||
IP: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
Cluster: struct {
|
||||
EndpointIP string `yaml:"endpointIp" json:"endpointIp"`
|
||||
Nodes struct {
|
||||
Talos struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
} `yaml:"talos" json:"talos"`
|
||||
} `yaml:"nodes" json:"nodes"`
|
||||
}{
|
||||
Nodes: struct {
|
||||
Talos struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
} `yaml:"talos" json:"talos"`
|
||||
}{
|
||||
Talos: struct {
|
||||
Version string `yaml:"version" json:"version"`
|
||||
}{
|
||||
Version: "v1.8.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.config.IsEmpty()
|
||||
if got != tt.want {
|
||||
t.Errorf("IsEmpty() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: LoadCloudConfig loads instance configuration
|
||||
func TestLoadCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configYAML string
|
||||
verify func(t *testing.T, config *InstanceConfig)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "loads complete instance configuration",
|
||||
configYAML: `cloud:
|
||||
router:
|
||||
ip: "192.168.1.254"
|
||||
dns:
|
||||
ip: "192.168.1.1"
|
||||
externalResolver: "8.8.8.8"
|
||||
dhcpRange: "192.168.1.100,192.168.1.200"
|
||||
baseDomain: "example.com"
|
||||
domain: "home"
|
||||
internalDomain: "internal.example.com"
|
||||
cluster:
|
||||
name: "my-cluster"
|
||||
loadBalancerIp: "192.168.1.10"
|
||||
nodes:
|
||||
talos:
|
||||
version: "v1.8.0"
|
||||
activeNodes:
|
||||
- node1:
|
||||
role: "control"
|
||||
interface: "eth0"
|
||||
disk: "/dev/sda"
|
||||
`,
|
||||
verify: func(t *testing.T, config *InstanceConfig) {
|
||||
if config.Cloud.BaseDomain != "example.com" {
|
||||
t.Error("base domain not loaded correctly")
|
||||
}
|
||||
if config.Cluster.Name != "my-cluster" {
|
||||
t.Error("cluster name not loaded correctly")
|
||||
}
|
||||
if config.Cluster.Nodes.Talos.Version != "v1.8.0" {
|
||||
t.Error("talos version not loaded correctly")
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(tt.configYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
config, err := LoadCloudConfig(configPath)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if config == nil {
|
||||
t.Fatal("config is nil")
|
||||
}
|
||||
|
||||
if tt.verify != nil {
|
||||
tt.verify(t, config)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: LoadCloudConfig error cases
|
||||
func TestLoadCloudConfig_Errors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFunc func(t *testing.T) string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "non-existent file",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
return filepath.Join(t.TempDir(), "nonexistent.yaml")
|
||||
},
|
||||
errContains: "reading config file",
|
||||
},
|
||||
{
|
||||
name: "invalid yaml",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
content := `invalid: yaml: [[[`
|
||||
if err := os.WriteFile(configPath, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
return configPath
|
||||
},
|
||||
errContains: "parsing config file",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configPath := tt.setupFunc(t)
|
||||
_, err := LoadCloudConfig(configPath)
|
||||
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: SaveCloudConfig saves instance configuration
|
||||
func TestSaveCloudConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config *InstanceConfig
|
||||
verify func(t *testing.T, configPath string)
|
||||
}{
|
||||
{
|
||||
name: "saves instance configuration",
|
||||
config: &InstanceConfig{
|
||||
Cloud: struct {
|
||||
Router struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
} `yaml:"router" json:"router"`
|
||||
DNS struct {
|
||||
IP string `yaml:"ip" json:"ip"`
|
||||
ExternalResolver string `yaml:"externalResolver" json:"externalResolver"`
|
||||
} `yaml:"dns" json:"dns"`
|
||||
DHCPRange string `yaml:"dhcpRange" json:"dhcpRange"`
|
||||
Dnsmasq struct {
|
||||
Interface string `yaml:"interface" json:"interface"`
|
||||
} `yaml:"dnsmasq" json:"dnsmasq"`
|
||||
BaseDomain string `yaml:"baseDomain" json:"baseDomain"`
|
||||
Domain string `yaml:"domain" json:"domain"`
|
||||
InternalDomain string `yaml:"internalDomain" json:"internalDomain"`
|
||||
NFS struct {
|
||||
MediaPath string `yaml:"mediaPath" json:"mediaPath"`
|
||||
Host string `yaml:"host" json:"host"`
|
||||
StorageCapacity string `yaml:"storageCapacity" json:"storageCapacity"`
|
||||
} `yaml:"nfs" json:"nfs"`
|
||||
DockerRegistryHost string `yaml:"dockerRegistryHost" json:"dockerRegistryHost"`
|
||||
Backup struct {
|
||||
Root string `yaml:"root" json:"root"`
|
||||
} `yaml:"backup" json:"backup"`
|
||||
}{
|
||||
BaseDomain: "example.com",
|
||||
Domain: "home",
|
||||
},
|
||||
},
|
||||
verify: func(t *testing.T, configPath string) {
|
||||
content, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read saved config: %v", err)
|
||||
}
|
||||
contentStr := string(content)
|
||||
if !strings.Contains(contentStr, "example.com") {
|
||||
t.Error("saved config missing base domain")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "subdir", "config.yaml")
|
||||
|
||||
err := SaveCloudConfig(tt.config, configPath)
|
||||
if err != nil {
|
||||
t.Errorf("SaveCloudConfig failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify file exists
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
t.Errorf("config file not created: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify content can be loaded back
|
||||
loadedConfig, err := LoadCloudConfig(configPath)
|
||||
if err != nil {
|
||||
t.Errorf("failed to reload saved config: %v", err)
|
||||
} else if loadedConfig == nil {
|
||||
t.Error("loaded config is nil")
|
||||
}
|
||||
|
||||
if tt.verify != nil {
|
||||
tt.verify(t, configPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Round-trip save and load preserves data
|
||||
func TestGlobalConfig_RoundTrip(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
// Create config with all fields
|
||||
original := &GlobalConfig{
|
||||
Wildcloud: struct {
|
||||
Repository string `yaml:"repository" json:"repository"`
|
||||
CurrentPhase string `yaml:"currentPhase" json:"currentPhase"`
|
||||
CompletedPhases []string `yaml:"completedPhases" json:"completedPhases"`
|
||||
}{
|
||||
Repository: "https://github.com/example/repo",
|
||||
CurrentPhase: "setup",
|
||||
CompletedPhases: []string{"phase1", "phase2"},
|
||||
},
|
||||
Server: struct {
|
||||
Port int `yaml:"port" json:"port"`
|
||||
Host string `yaml:"host" json:"host"`
|
||||
}{
|
||||
Port: 8080,
|
||||
Host: "localhost",
|
||||
},
|
||||
Operator: struct {
|
||||
Email string `yaml:"email" json:"email"`
|
||||
}{
|
||||
Email: "admin@example.com",
|
||||
},
|
||||
}
|
||||
|
||||
// Save config
|
||||
if err := SaveGlobalConfig(original, configPath); err != nil {
|
||||
t.Fatalf("SaveGlobalConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Load config
|
||||
loaded, err := LoadGlobalConfig(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadGlobalConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify all fields match
|
||||
if loaded.Wildcloud.Repository != original.Wildcloud.Repository {
|
||||
t.Errorf("repository mismatch: got %q, want %q", loaded.Wildcloud.Repository, original.Wildcloud.Repository)
|
||||
}
|
||||
if loaded.Server.Port != original.Server.Port {
|
||||
t.Errorf("port mismatch: got %d, want %d", loaded.Server.Port, original.Server.Port)
|
||||
}
|
||||
if loaded.Operator.Email != original.Operator.Email {
|
||||
t.Errorf("email mismatch: got %q, want %q", loaded.Operator.Email, original.Operator.Email)
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Round-trip save and load for instance config
|
||||
func TestInstanceConfig_RoundTrip(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
// Create instance config
|
||||
original := &InstanceConfig{}
|
||||
original.Cloud.BaseDomain = "example.com"
|
||||
original.Cloud.Domain = "home"
|
||||
original.Cluster.Name = "my-cluster"
|
||||
|
||||
// Save config
|
||||
if err := SaveCloudConfig(original, configPath); err != nil {
|
||||
t.Fatalf("SaveCloudConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Load config
|
||||
loaded, err := LoadCloudConfig(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("LoadCloudConfig failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify fields match
|
||||
if loaded.Cloud.BaseDomain != original.Cloud.BaseDomain {
|
||||
t.Errorf("base domain mismatch: got %q, want %q", loaded.Cloud.BaseDomain, original.Cloud.BaseDomain)
|
||||
}
|
||||
if loaded.Cluster.Name != original.Cluster.Name {
|
||||
t.Errorf("cluster name mismatch: got %q, want %q", loaded.Cluster.Name, original.Cluster.Name)
|
||||
}
|
||||
}
|
||||
905
internal/config/manager_test.go
Normal file
905
internal/config/manager_test.go
Normal file
@@ -0,0 +1,905 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/storage"
|
||||
)
|
||||
|
||||
// Test: NewManager creates manager successfully
|
||||
func TestNewManager(t *testing.T) {
|
||||
m := NewManager()
|
||||
if m == nil {
|
||||
t.Fatal("NewManager returned nil")
|
||||
}
|
||||
if m.yq == nil {
|
||||
t.Error("Manager.yq is nil")
|
||||
}
|
||||
}
|
||||
|
||||
// Test: EnsureInstanceConfig creates config file with proper structure
|
||||
func TestEnsureInstanceConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFunc func(t *testing.T, instancePath string)
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "creates config when not exists",
|
||||
setupFunc: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "returns nil when config exists",
|
||||
setupFunc: func(t *testing.T, instancePath string) {
|
||||
configPath := filepath.Join(instancePath, "config.yaml")
|
||||
content := `baseDomain: "test.local"
|
||||
domain: "test"
|
||||
internalDomain: "internal.test"
|
||||
dhcpRange: ""
|
||||
backup:
|
||||
root: ""
|
||||
nfs:
|
||||
host: ""
|
||||
mediaPath: ""
|
||||
cluster:
|
||||
name: ""
|
||||
loadBalancerIp: ""
|
||||
ipAddressPool: ""
|
||||
hostnamePrefix: ""
|
||||
certManager:
|
||||
cloudflare:
|
||||
domain: ""
|
||||
zoneID: ""
|
||||
externalDns:
|
||||
ownerId: ""
|
||||
nodes:
|
||||
talos:
|
||||
version: ""
|
||||
schematicId: ""
|
||||
control:
|
||||
vip: ""
|
||||
activeNodes: []
|
||||
`
|
||||
if err := storage.WriteFile(configPath, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "returns error when config is invalid yaml",
|
||||
setupFunc: func(t *testing.T, instancePath string) {
|
||||
configPath := filepath.Join(instancePath, "config.yaml")
|
||||
content := `invalid: yaml: content: [[[`
|
||||
if err := storage.WriteFile(configPath, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
},
|
||||
wantErr: true,
|
||||
errContains: "invalid config file",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
instancePath := t.TempDir()
|
||||
m := NewManager()
|
||||
|
||||
if tt.setupFunc != nil {
|
||||
tt.setupFunc(t, instancePath)
|
||||
}
|
||||
|
||||
err := m.EnsureInstanceConfig(instancePath)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify config file exists
|
||||
configPath := filepath.Join(instancePath, "config.yaml")
|
||||
if !storage.FileExists(configPath) {
|
||||
t.Error("config file not created")
|
||||
}
|
||||
|
||||
// Verify config is valid YAML
|
||||
if err := m.ValidateConfig(configPath); err != nil {
|
||||
t.Errorf("config validation failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify config has expected structure
|
||||
content, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read config: %v", err)
|
||||
}
|
||||
contentStr := string(content)
|
||||
requiredFields := []string{"baseDomain:", "domain:", "cluster:", "backup:", "nfs:"}
|
||||
for _, field := range requiredFields {
|
||||
if !strings.Contains(contentStr, field) {
|
||||
t.Errorf("config missing required field: %s", field)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: GetConfigValue retrieves values correctly
|
||||
func TestGetConfigValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configYAML string
|
||||
key string
|
||||
want string
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "get simple string value",
|
||||
configYAML: `baseDomain: "example.com"
|
||||
domain: "test"
|
||||
`,
|
||||
key: "baseDomain",
|
||||
want: "example.com",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "get nested value with dot notation",
|
||||
configYAML: `cluster:
|
||||
name: "my-cluster"
|
||||
nodes:
|
||||
talos:
|
||||
version: "v1.8.0"
|
||||
`,
|
||||
key: "cluster.nodes.talos.version",
|
||||
want: "v1.8.0",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "get empty string value",
|
||||
configYAML: `baseDomain: ""
|
||||
`,
|
||||
key: "baseDomain",
|
||||
want: "",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "get non-existent key returns null",
|
||||
configYAML: `baseDomain: "example.com"
|
||||
`,
|
||||
key: "nonexistent",
|
||||
want: "null",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "get from array",
|
||||
configYAML: `cluster:
|
||||
nodes:
|
||||
activeNodes:
|
||||
- "node1"
|
||||
- "node2"
|
||||
`,
|
||||
key: "cluster.nodes.activeNodes.[0]",
|
||||
want: "node1",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
if err := storage.WriteFile(configPath, []byte(tt.configYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
m := NewManager()
|
||||
got, err := m.GetConfigValue(configPath, tt.key)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if got != tt.want {
|
||||
t.Errorf("got %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: GetConfigValue error cases
|
||||
func TestGetConfigValue_Errors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFunc func(t *testing.T) string
|
||||
key string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "non-existent file",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
return filepath.Join(t.TempDir(), "nonexistent.yaml")
|
||||
},
|
||||
key: "baseDomain",
|
||||
errContains: "config file not found",
|
||||
},
|
||||
{
|
||||
name: "malformed yaml",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
content := `invalid: yaml: [[[`
|
||||
if err := storage.WriteFile(configPath, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
return configPath
|
||||
},
|
||||
key: "baseDomain",
|
||||
errContains: "getting config value",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configPath := tt.setupFunc(t)
|
||||
m := NewManager()
|
||||
|
||||
_, err := m.GetConfigValue(configPath, tt.key)
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: SetConfigValue sets values correctly
|
||||
func TestSetConfigValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initialYAML string
|
||||
key string
|
||||
value string
|
||||
verifyFunc func(t *testing.T, configPath string)
|
||||
}{
|
||||
{
|
||||
name: "set simple value",
|
||||
initialYAML: `baseDomain: ""
|
||||
domain: ""
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: "example.com",
|
||||
verifyFunc: func(t *testing.T, configPath string) {
|
||||
m := NewManager()
|
||||
got, err := m.GetConfigValue(configPath, "baseDomain")
|
||||
if err != nil {
|
||||
t.Fatalf("verify failed: %v", err)
|
||||
}
|
||||
if got != "example.com" {
|
||||
t.Errorf("got %q, want %q", got, "example.com")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set nested value",
|
||||
initialYAML: `cluster:
|
||||
name: ""
|
||||
nodes:
|
||||
talos:
|
||||
version: ""
|
||||
`,
|
||||
key: "cluster.nodes.talos.version",
|
||||
value: "v1.8.0",
|
||||
verifyFunc: func(t *testing.T, configPath string) {
|
||||
m := NewManager()
|
||||
got, err := m.GetConfigValue(configPath, "cluster.nodes.talos.version")
|
||||
if err != nil {
|
||||
t.Fatalf("verify failed: %v", err)
|
||||
}
|
||||
if got != "v1.8.0" {
|
||||
t.Errorf("got %q, want %q", got, "v1.8.0")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update existing value",
|
||||
initialYAML: `baseDomain: "old.com"
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: "new.com",
|
||||
verifyFunc: func(t *testing.T, configPath string) {
|
||||
m := NewManager()
|
||||
got, err := m.GetConfigValue(configPath, "baseDomain")
|
||||
if err != nil {
|
||||
t.Fatalf("verify failed: %v", err)
|
||||
}
|
||||
if got != "new.com" {
|
||||
t.Errorf("got %q, want %q", got, "new.com")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create new nested path",
|
||||
initialYAML: `cluster: {}
|
||||
`,
|
||||
key: "cluster.newField",
|
||||
value: "newValue",
|
||||
verifyFunc: func(t *testing.T, configPath string) {
|
||||
m := NewManager()
|
||||
got, err := m.GetConfigValue(configPath, "cluster.newField")
|
||||
if err != nil {
|
||||
t.Fatalf("verify failed: %v", err)
|
||||
}
|
||||
if got != "newValue" {
|
||||
t.Errorf("got %q, want %q", got, "newValue")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set value with special characters",
|
||||
initialYAML: `baseDomain: ""
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: `special"quotes'and\backslashes`,
|
||||
verifyFunc: func(t *testing.T, configPath string) {
|
||||
m := NewManager()
|
||||
got, err := m.GetConfigValue(configPath, "baseDomain")
|
||||
if err != nil {
|
||||
t.Fatalf("verify failed: %v", err)
|
||||
}
|
||||
if got != `special"quotes'and\backslashes` {
|
||||
t.Errorf("got %q, want %q", got, `special"quotes'and\backslashes`)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
if err := storage.WriteFile(configPath, []byte(tt.initialYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
m := NewManager()
|
||||
if err := m.SetConfigValue(configPath, tt.key, tt.value); err != nil {
|
||||
t.Errorf("SetConfigValue failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the value was set correctly
|
||||
tt.verifyFunc(t, configPath)
|
||||
|
||||
// Verify config is still valid YAML
|
||||
if err := m.ValidateConfig(configPath); err != nil {
|
||||
t.Errorf("config validation failed after set: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: SetConfigValue error cases
|
||||
func TestSetConfigValue_Errors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFunc func(t *testing.T) string
|
||||
key string
|
||||
value string
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "non-existent file",
|
||||
setupFunc: func(t *testing.T) string {
|
||||
return filepath.Join(t.TempDir(), "nonexistent.yaml")
|
||||
},
|
||||
key: "baseDomain",
|
||||
value: "example.com",
|
||||
errContains: "config file not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
configPath := tt.setupFunc(t)
|
||||
m := NewManager()
|
||||
|
||||
err := m.SetConfigValue(configPath, tt.key, tt.value)
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: SetConfigValue with concurrent access
|
||||
func TestSetConfigValue_ConcurrentAccess(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
initialYAML := `counter: "0"
|
||||
`
|
||||
if err := storage.WriteFile(configPath, []byte(initialYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
m := NewManager()
|
||||
const numGoroutines = 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines)
|
||||
|
||||
// Launch multiple goroutines trying to write different values
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(val int) {
|
||||
defer wg.Done()
|
||||
key := "counter"
|
||||
value := string(rune('0' + val))
|
||||
if err := m.SetConfigValue(configPath, key, value); err != nil {
|
||||
errors <- err
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check if any errors occurred
|
||||
for err := range errors {
|
||||
t.Errorf("concurrent write error: %v", err)
|
||||
}
|
||||
|
||||
// Verify config is still valid after concurrent access
|
||||
if err := m.ValidateConfig(configPath); err != nil {
|
||||
t.Errorf("config validation failed after concurrent writes: %v", err)
|
||||
}
|
||||
|
||||
// Verify we can read the value (should be one of the written values)
|
||||
value, err := m.GetConfigValue(configPath, "counter")
|
||||
if err != nil {
|
||||
t.Errorf("failed to read value after concurrent writes: %v", err)
|
||||
}
|
||||
if value == "" || value == "null" {
|
||||
t.Error("counter value is empty after concurrent writes")
|
||||
}
|
||||
}
|
||||
|
||||
// Test: EnsureConfigValue sets value only when not set
|
||||
func TestEnsureConfigValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
initialYAML string
|
||||
key string
|
||||
value string
|
||||
expectSet bool
|
||||
}{
|
||||
{
|
||||
name: "sets value when empty string",
|
||||
initialYAML: `baseDomain: ""
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: "example.com",
|
||||
expectSet: true,
|
||||
},
|
||||
{
|
||||
name: "sets value when null",
|
||||
initialYAML: `baseDomain: null
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: "example.com",
|
||||
expectSet: true,
|
||||
},
|
||||
{
|
||||
name: "does not set value when already set",
|
||||
initialYAML: `baseDomain: "existing.com"
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: "new.com",
|
||||
expectSet: false,
|
||||
},
|
||||
{
|
||||
name: "sets value when key does not exist",
|
||||
initialYAML: `domain: "test"
|
||||
`,
|
||||
key: "baseDomain",
|
||||
value: "example.com",
|
||||
expectSet: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
if err := storage.WriteFile(configPath, []byte(tt.initialYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
m := NewManager()
|
||||
|
||||
// Get initial value
|
||||
initialVal, _ := m.GetConfigValue(configPath, tt.key)
|
||||
|
||||
// Call EnsureConfigValue
|
||||
if err := m.EnsureConfigValue(configPath, tt.key, tt.value); err != nil {
|
||||
t.Errorf("EnsureConfigValue failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get final value
|
||||
finalVal, err := m.GetConfigValue(configPath, tt.key)
|
||||
if err != nil {
|
||||
t.Fatalf("GetConfigValue failed: %v", err)
|
||||
}
|
||||
|
||||
if tt.expectSet {
|
||||
if finalVal != tt.value {
|
||||
t.Errorf("expected value to be set to %q, got %q", tt.value, finalVal)
|
||||
}
|
||||
} else {
|
||||
if finalVal != initialVal {
|
||||
t.Errorf("expected value to remain %q, got %q", initialVal, finalVal)
|
||||
}
|
||||
}
|
||||
|
||||
// Call EnsureConfigValue again - should be idempotent
|
||||
if err := m.EnsureConfigValue(configPath, tt.key, "different.com"); err != nil {
|
||||
t.Errorf("second EnsureConfigValue failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Value should not change on second call
|
||||
secondVal, err := m.GetConfigValue(configPath, tt.key)
|
||||
if err != nil {
|
||||
t.Fatalf("GetConfigValue failed: %v", err)
|
||||
}
|
||||
if secondVal != finalVal {
|
||||
t.Errorf("value changed on second ensure: %q -> %q", finalVal, secondVal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: ValidateConfig validates YAML correctly
|
||||
func TestValidateConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configYAML string
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "valid yaml",
|
||||
configYAML: `baseDomain: "example.com"
|
||||
domain: "test"
|
||||
cluster:
|
||||
name: "my-cluster"
|
||||
`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid yaml - bad indentation",
|
||||
configYAML: `baseDomain: "example.com"\n domain: "test"`,
|
||||
wantErr: true,
|
||||
errContains: "yaml validation failed",
|
||||
},
|
||||
{
|
||||
name: "invalid yaml - unclosed bracket",
|
||||
configYAML: `cluster: { name: "test"`,
|
||||
wantErr: true,
|
||||
errContains: "yaml validation failed",
|
||||
},
|
||||
{
|
||||
name: "empty file",
|
||||
configYAML: "",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
|
||||
if err := storage.WriteFile(configPath, []byte(tt.configYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
m := NewManager()
|
||||
err := m.ValidateConfig(configPath)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: ValidateConfig error cases
|
||||
func TestValidateConfig_Errors(t *testing.T) {
|
||||
t.Run("non-existent file", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configPath := filepath.Join(tempDir, "nonexistent.yaml")
|
||||
|
||||
m := NewManager()
|
||||
err := m.ValidateConfig(configPath)
|
||||
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), "config file not found") {
|
||||
t.Errorf("error %q does not contain 'config file not found'", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test: CopyConfig copies configuration correctly
|
||||
func TestCopyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
srcYAML string
|
||||
setupDst func(t *testing.T, dstPath string)
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "copies config successfully",
|
||||
srcYAML: `baseDomain: "example.com"
|
||||
domain: "test"
|
||||
cluster:
|
||||
name: "my-cluster"
|
||||
`,
|
||||
setupDst: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "creates destination directory",
|
||||
srcYAML: `baseDomain: "example.com"`,
|
||||
setupDst: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "overwrites existing destination",
|
||||
srcYAML: `baseDomain: "new.com"
|
||||
`,
|
||||
setupDst: func(t *testing.T, dstPath string) {
|
||||
oldContent := `baseDomain: "old.com"`
|
||||
if err := storage.WriteFile(dstPath, []byte(oldContent), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
srcPath := filepath.Join(tempDir, "source.yaml")
|
||||
dstPath := filepath.Join(tempDir, "subdir", "dest.yaml")
|
||||
|
||||
// Create source file
|
||||
if err := storage.WriteFile(srcPath, []byte(tt.srcYAML), 0644); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
// Setup destination if needed
|
||||
if tt.setupDst != nil {
|
||||
if err := storage.EnsureDir(filepath.Dir(dstPath), 0755); err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
tt.setupDst(t, dstPath)
|
||||
}
|
||||
|
||||
m := NewManager()
|
||||
err := m.CopyConfig(srcPath, dstPath)
|
||||
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify destination file exists
|
||||
if !storage.FileExists(dstPath) {
|
||||
t.Error("destination file not created")
|
||||
}
|
||||
|
||||
// Verify content matches source
|
||||
srcContent, err := storage.ReadFile(srcPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read source: %v", err)
|
||||
}
|
||||
dstContent, err := storage.ReadFile(dstPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read destination: %v", err)
|
||||
}
|
||||
|
||||
if string(srcContent) != string(dstContent) {
|
||||
t.Error("destination content does not match source")
|
||||
}
|
||||
|
||||
// Verify destination is valid YAML
|
||||
if err := m.ValidateConfig(dstPath); err != nil {
|
||||
t.Errorf("destination config validation failed: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: CopyConfig error cases
|
||||
func TestCopyConfig_Errors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupFunc func(t *testing.T, tempDir string) (srcPath, dstPath string)
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "source file does not exist",
|
||||
setupFunc: func(t *testing.T, tempDir string) (string, string) {
|
||||
return filepath.Join(tempDir, "nonexistent.yaml"),
|
||||
filepath.Join(tempDir, "dest.yaml")
|
||||
},
|
||||
errContains: "source config file not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
srcPath, dstPath := tt.setupFunc(t, tempDir)
|
||||
|
||||
m := NewManager()
|
||||
err := m.CopyConfig(srcPath, dstPath)
|
||||
|
||||
if err == nil {
|
||||
t.Error("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), tt.errContains) {
|
||||
t.Errorf("error %q does not contain %q", err.Error(), tt.errContains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test: File permissions are preserved
|
||||
func TestEnsureInstanceConfig_FilePermissions(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
m := NewManager()
|
||||
|
||||
if err := m.EnsureInstanceConfig(tempDir); err != nil {
|
||||
t.Fatalf("EnsureInstanceConfig failed: %v", err)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
info, err := os.Stat(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to stat config file: %v", err)
|
||||
}
|
||||
|
||||
// Verify file has 0644 permissions
|
||||
if info.Mode().Perm() != 0644 {
|
||||
t.Errorf("expected permissions 0644, got %v", info.Mode().Perm())
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Idempotent config creation
|
||||
func TestEnsureInstanceConfig_Idempotent(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
m := NewManager()
|
||||
|
||||
// First call creates config
|
||||
if err := m.EnsureInstanceConfig(tempDir); err != nil {
|
||||
t.Fatalf("first EnsureInstanceConfig failed: %v", err)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
firstContent, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read config: %v", err)
|
||||
}
|
||||
|
||||
// Second call should not modify config
|
||||
if err := m.EnsureInstanceConfig(tempDir); err != nil {
|
||||
t.Fatalf("second EnsureInstanceConfig failed: %v", err)
|
||||
}
|
||||
|
||||
secondContent, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read config: %v", err)
|
||||
}
|
||||
|
||||
if string(firstContent) != string(secondContent) {
|
||||
t.Error("config content changed on second call")
|
||||
}
|
||||
}
|
||||
|
||||
// Test: Config structure contains all required fields
|
||||
func TestEnsureInstanceConfig_RequiredFields(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
m := NewManager()
|
||||
|
||||
if err := m.EnsureInstanceConfig(tempDir); err != nil {
|
||||
t.Fatalf("EnsureInstanceConfig failed: %v", err)
|
||||
}
|
||||
|
||||
configPath := filepath.Join(tempDir, "config.yaml")
|
||||
content, err := storage.ReadFile(configPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read config: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(content)
|
||||
requiredFields := []string{
|
||||
"baseDomain:",
|
||||
"domain:",
|
||||
"internalDomain:",
|
||||
"dhcpRange:",
|
||||
"backup:",
|
||||
"nfs:",
|
||||
"cluster:",
|
||||
"loadBalancerIp:",
|
||||
"ipAddressPool:",
|
||||
"hostnamePrefix:",
|
||||
"certManager:",
|
||||
"externalDns:",
|
||||
"nodes:",
|
||||
"talos:",
|
||||
"version:",
|
||||
"schematicId:",
|
||||
"control:",
|
||||
"vip:",
|
||||
"activeNodes:",
|
||||
}
|
||||
|
||||
for _, field := range requiredFields {
|
||||
if !strings.Contains(contentStr, field) {
|
||||
t.Errorf("config missing required field: %s", field)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package discovery
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@@ -28,19 +29,17 @@ func NewManager(dataDir string, instanceName string) *Manager {
|
||||
|
||||
return &Manager{
|
||||
dataDir: dataDir,
|
||||
nodeMgr: node.NewManager(dataDir),
|
||||
nodeMgr: node.NewManager(dataDir, instanceName),
|
||||
talosctl: tools.NewTalosconfigWithConfig(talosconfigPath),
|
||||
}
|
||||
}
|
||||
|
||||
// DiscoveredNode represents a discovered node on the network
|
||||
// DiscoveredNode represents a discovered node on the network (maintenance mode only)
|
||||
type DiscoveredNode struct {
|
||||
IP string `json:"ip"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
MaintenanceMode bool `json:"maintenance_mode"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Interface string `json:"interface,omitempty"`
|
||||
Disks []string `json:"disks,omitempty"`
|
||||
IP string `json:"ip"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
MaintenanceMode bool `json:"maintenance_mode"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
// DiscoveryStatus represents the current state of discovery
|
||||
@@ -130,17 +129,42 @@ func (m *Manager) runDiscovery(instanceName string, ipList []string) {
|
||||
_ = m.writeDiscoveryStatus(instanceName, status)
|
||||
}()
|
||||
|
||||
// Discover nodes by probing each IP
|
||||
discoveredNodes := []DiscoveredNode{}
|
||||
// Discover nodes by probing each IP in parallel
|
||||
var wg sync.WaitGroup
|
||||
resultsChan := make(chan DiscoveredNode, len(ipList))
|
||||
|
||||
// Limit concurrent scans to avoid overwhelming the network
|
||||
semaphore := make(chan struct{}, 50)
|
||||
|
||||
for _, ip := range ipList {
|
||||
node, err := m.probeNode(ip)
|
||||
if err != nil {
|
||||
// Node not reachable or not a Talos node
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(ip string) {
|
||||
defer wg.Done()
|
||||
|
||||
discoveredNodes = append(discoveredNodes, *node)
|
||||
// Acquire semaphore
|
||||
semaphore <- struct{}{}
|
||||
defer func() { <-semaphore }()
|
||||
|
||||
node, err := m.probeNode(ip)
|
||||
if err != nil {
|
||||
// Node not reachable or not a Talos node
|
||||
return
|
||||
}
|
||||
|
||||
resultsChan <- *node
|
||||
}(ip)
|
||||
}
|
||||
|
||||
// Close results channel when all goroutines complete
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
}()
|
||||
|
||||
// Collect results and update status incrementally
|
||||
discoveredNodes := []DiscoveredNode{}
|
||||
for node := range resultsChan {
|
||||
discoveredNodes = append(discoveredNodes, node)
|
||||
|
||||
// Update status incrementally
|
||||
m.discoveryMu.Lock()
|
||||
@@ -151,37 +175,20 @@ func (m *Manager) runDiscovery(instanceName string, ipList []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// probeNode attempts to detect if a node is running Talos
|
||||
// probeNode attempts to detect if a node is running Talos in maintenance mode
|
||||
func (m *Manager) probeNode(ip string) (*DiscoveredNode, error) {
|
||||
// Attempt to get version (quick connectivity test)
|
||||
version, err := m.talosctl.GetVersion(ip, false)
|
||||
// Try insecure connection first (maintenance mode)
|
||||
version, err := m.talosctl.GetVersion(ip, true)
|
||||
if err != nil {
|
||||
// Not in maintenance mode or not reachable
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Node is reachable, get hardware info
|
||||
hwInfo, err := m.nodeMgr.DetectHardware(ip)
|
||||
if err != nil {
|
||||
// Still count it as discovered even if we can't get full hardware
|
||||
return &DiscoveredNode{
|
||||
IP: ip,
|
||||
MaintenanceMode: false,
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Extract just the disk paths for discovery output
|
||||
diskPaths := make([]string, len(hwInfo.Disks))
|
||||
for i, disk := range hwInfo.Disks {
|
||||
diskPaths[i] = disk.Path
|
||||
}
|
||||
|
||||
// If insecure connection works, node is in maintenance mode
|
||||
return &DiscoveredNode{
|
||||
IP: ip,
|
||||
MaintenanceMode: hwInfo.MaintenanceMode,
|
||||
MaintenanceMode: true,
|
||||
Version: version,
|
||||
Interface: hwInfo.Interface,
|
||||
Disks: diskPaths,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -245,3 +252,132 @@ func (m *Manager) writeDiscoveryStatus(instanceName string, status *DiscoverySta
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelDiscovery cancels an in-progress discovery operation
|
||||
func (m *Manager) CancelDiscovery(instanceName string) error {
|
||||
m.discoveryMu.Lock()
|
||||
defer m.discoveryMu.Unlock()
|
||||
|
||||
// Get current status
|
||||
status, err := m.GetDiscoveryStatus(instanceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !status.Active {
|
||||
return fmt.Errorf("no discovery in progress")
|
||||
}
|
||||
|
||||
// Mark discovery as cancelled
|
||||
status.Active = false
|
||||
status.Error = "Discovery cancelled by user"
|
||||
|
||||
if err := m.writeDiscoveryStatus(instanceName, status); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLocalNetworks discovers local network interfaces and returns their CIDR addresses
|
||||
// Skips loopback, link-local, and down interfaces
|
||||
// Only returns IPv4 networks
|
||||
func GetLocalNetworks() ([]string, error) {
|
||||
interfaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get network interfaces: %w", err)
|
||||
}
|
||||
|
||||
var networks []string
|
||||
for _, iface := range interfaces {
|
||||
// Skip loopback and down interfaces
|
||||
if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
ipnet, ok := addr.(*net.IPNet)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only IPv4 for now
|
||||
if ipnet.IP.To4() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip link-local addresses (169.254.0.0/16)
|
||||
if ipnet.IP.IsLinkLocalUnicast() {
|
||||
continue
|
||||
}
|
||||
|
||||
networks = append(networks, ipnet.String())
|
||||
}
|
||||
}
|
||||
|
||||
return networks, nil
|
||||
}
|
||||
|
||||
// ExpandSubnet expands a CIDR notation subnet into individual IP addresses
|
||||
// Example: "192.168.8.0/24" → ["192.168.8.1", "192.168.8.2", ..., "192.168.8.254"]
|
||||
// Also handles single IPs (without CIDR notation)
|
||||
func ExpandSubnet(subnet string) ([]string, error) {
|
||||
// Check if it's a CIDR notation
|
||||
ip, ipnet, err := net.ParseCIDR(subnet)
|
||||
if err != nil {
|
||||
// Not a CIDR, might be single IP
|
||||
if net.ParseIP(subnet) != nil {
|
||||
return []string{subnet}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid IP or CIDR: %s", subnet)
|
||||
}
|
||||
|
||||
// Special case: /32 (single host) - just return the IP
|
||||
ones, _ := ipnet.Mask.Size()
|
||||
if ones == 32 {
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
var ips []string
|
||||
|
||||
// Iterate through all IPs in the subnet
|
||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); incIP(ip) {
|
||||
// Skip network address (first IP)
|
||||
if ip.Equal(ipnet.IP) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip broadcast address (last IP)
|
||||
if isLastIP(ip, ipnet) {
|
||||
continue
|
||||
}
|
||||
|
||||
ips = append(ips, ip.String())
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// incIP increments an IP address
|
||||
func incIP(ip net.IP) {
|
||||
for j := len(ip) - 1; j >= 0; j-- {
|
||||
ip[j]++
|
||||
if ip[j] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isLastIP checks if an IP is the last IP in a subnet (broadcast address)
|
||||
func isLastIP(ip net.IP, ipnet *net.IPNet) bool {
|
||||
lastIP := make(net.IP, len(ip))
|
||||
for i := range ip {
|
||||
lastIP[i] = ip[i] | ^ipnet.Mask[i]
|
||||
}
|
||||
return ip.Equal(lastIP)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/config"
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/setup"
|
||||
@@ -20,11 +22,22 @@ type Manager struct {
|
||||
}
|
||||
|
||||
// NewManager creates a new node manager
|
||||
func NewManager(dataDir string) *Manager {
|
||||
func NewManager(dataDir string, instanceName string) *Manager {
|
||||
var talosctl *tools.Talosctl
|
||||
|
||||
// If instanceName is provided, use instance-specific talosconfig
|
||||
// Otherwise, create basic talosctl (will use --insecure mode)
|
||||
if instanceName != "" {
|
||||
talosconfigPath := tools.GetTalosconfigPath(dataDir, instanceName)
|
||||
talosctl = tools.NewTalosconfigWithConfig(talosconfigPath)
|
||||
} else {
|
||||
talosctl = tools.NewTalosctl()
|
||||
}
|
||||
|
||||
return &Manager{
|
||||
dataDir: dataDir,
|
||||
configMgr: config.NewManager(),
|
||||
talosctl: tools.NewTalosctl(),
|
||||
talosctl: talosctl,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,23 +256,53 @@ func (m *Manager) Add(instanceName string, node *Node) error {
|
||||
}
|
||||
|
||||
// Delete removes a node from config.yaml
|
||||
func (m *Manager) Delete(instanceName, nodeIdentifier string) error {
|
||||
// If skipReset is false, the node will be reset before deletion (with 30s timeout)
|
||||
func (m *Manager) Delete(instanceName, nodeIdentifier string, skipReset bool) error {
|
||||
// Get node to find hostname
|
||||
node, err := m.Get(instanceName, nodeIdentifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset node first unless skipReset is true
|
||||
if !skipReset {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Use goroutine to respect context timeout
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- m.Reset(instanceName, nodeIdentifier)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reset node before deletion (use skip_reset=true to force delete): %w", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("node reset timed out after 30 seconds (use skip_reset=true to force delete)")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete node from config.yaml
|
||||
return m.deleteFromConfig(instanceName, node.Hostname)
|
||||
}
|
||||
|
||||
// deleteFromConfig removes a node entry from config.yaml
|
||||
func (m *Manager) deleteFromConfig(instanceName, hostname string) error {
|
||||
instancePath := m.GetInstancePath(instanceName)
|
||||
configPath := filepath.Join(instancePath, "config.yaml")
|
||||
|
||||
// Delete node from config.yaml
|
||||
// Path: cluster.nodes.active.{hostname}
|
||||
nodePath := fmt.Sprintf("cluster.nodes.active.%s", node.Hostname)
|
||||
// Path: .cluster.nodes.active["hostname"]
|
||||
// Use bracket notation to safely handle hostnames with special characters
|
||||
nodePath := fmt.Sprintf(".cluster.nodes.active[\"%s\"]", hostname)
|
||||
|
||||
yq := tools.NewYQ()
|
||||
// Use yq to delete the node
|
||||
_, err = yq.Exec("eval", "-i", fmt.Sprintf("del(%s)", nodePath), configPath)
|
||||
delExpr := fmt.Sprintf("del(%s)", nodePath)
|
||||
_, err := yq.Exec("eval", "-i", delExpr, configPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete node: %w", err)
|
||||
}
|
||||
@@ -268,10 +311,20 @@ func (m *Manager) Delete(instanceName, nodeIdentifier string) error {
|
||||
}
|
||||
|
||||
// DetectHardware queries node hardware information via talosctl
|
||||
// Automatically detects maintenance mode by trying insecure first, then secure
|
||||
func (m *Manager) DetectHardware(nodeIP string) (*HardwareInfo, error) {
|
||||
// Query node with insecure flag (maintenance mode)
|
||||
insecure := true
|
||||
// Try insecure first (maintenance mode)
|
||||
hwInfo, err := m.detectHardwareWithMode(nodeIP, true)
|
||||
if err == nil {
|
||||
return hwInfo, nil
|
||||
}
|
||||
|
||||
// Fall back to secure (configured node)
|
||||
return m.detectHardwareWithMode(nodeIP, false)
|
||||
}
|
||||
|
||||
// detectHardwareWithMode queries node hardware with specified connection mode
|
||||
func (m *Manager) detectHardwareWithMode(nodeIP string, insecure bool) (*HardwareInfo, error) {
|
||||
// Try to get default interface (with default route)
|
||||
iface, err := m.talosctl.GetDefaultInterface(nodeIP, insecure)
|
||||
if err != nil {
|
||||
@@ -299,7 +352,7 @@ func (m *Manager) DetectHardware(nodeIP string) (*HardwareInfo, error) {
|
||||
Interface: iface,
|
||||
Disks: disks,
|
||||
SelectedDisk: selectedDisk,
|
||||
MaintenanceMode: true,
|
||||
MaintenanceMode: insecure, // If we used insecure, it's in maintenance mode
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -380,9 +433,9 @@ func (m *Manager) Apply(instanceName, nodeIdentifier string, opts ApplyOptions)
|
||||
// Determine which IP to use and whether node is in maintenance mode
|
||||
//
|
||||
// Three scenarios:
|
||||
// 1. Production node (currentIP empty/same, maintenance=false): use targetIP, no --insecure
|
||||
// 1. Production node (already applied, maintenance=false): use targetIP, no --insecure
|
||||
// 2. IP changing (currentIP != targetIP): use currentIP, --insecure (always maintenance)
|
||||
// 3. Maintenance at target (maintenance=true, no IP change): use targetIP, --insecure
|
||||
// 3. Fresh/maintenance node (never applied OR maintenance=true): use targetIP, --insecure
|
||||
var deployIP string
|
||||
var maintenanceMode bool
|
||||
|
||||
@@ -390,12 +443,13 @@ func (m *Manager) Apply(instanceName, nodeIdentifier string, opts ApplyOptions)
|
||||
// Scenario 2: IP is changing - node is at currentIP, moving to targetIP
|
||||
deployIP = node.CurrentIP
|
||||
maintenanceMode = true
|
||||
} else if node.Maintenance {
|
||||
// Scenario 3: Explicit maintenance mode, no IP change
|
||||
} else if node.Maintenance || !node.Applied {
|
||||
// Scenario 3: Explicit maintenance mode OR never been applied (fresh node)
|
||||
// Fresh nodes need --insecure because they have self-signed certificates
|
||||
deployIP = node.TargetIP
|
||||
maintenanceMode = true
|
||||
} else {
|
||||
// Scenario 1: Production node at target IP
|
||||
// Scenario 1: Production node at target IP (already applied, not in maintenance)
|
||||
deployIP = node.TargetIP
|
||||
maintenanceMode = false
|
||||
}
|
||||
@@ -562,17 +616,21 @@ func (m *Manager) updateNodeStatus(instanceName string, node *Node) error {
|
||||
}
|
||||
|
||||
// Update configured flag
|
||||
configuredValue := "false"
|
||||
if node.Configured {
|
||||
if err := yq.Set(configPath, basePath+".configured", "true"); err != nil {
|
||||
return err
|
||||
}
|
||||
configuredValue = "true"
|
||||
}
|
||||
if err := yq.Set(configPath, basePath+".configured", configuredValue); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update applied flag
|
||||
appliedValue := "false"
|
||||
if node.Applied {
|
||||
if err := yq.Set(configPath, basePath+".applied", "true"); err != nil {
|
||||
return err
|
||||
}
|
||||
appliedValue = "true"
|
||||
}
|
||||
if err := yq.Set(configPath, basePath+".applied", appliedValue); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -652,3 +710,49 @@ func (m *Manager) FetchTemplates(instanceName string) error {
|
||||
destDir := filepath.Join(instancePath, "setup", "cluster-nodes", "patch.templates")
|
||||
return m.extractEmbeddedTemplates(destDir)
|
||||
}
|
||||
|
||||
// Reset resets a node to maintenance mode
|
||||
func (m *Manager) Reset(instanceName, nodeIdentifier string) error {
|
||||
// Get node
|
||||
node, err := m.Get(instanceName, nodeIdentifier)
|
||||
if err != nil {
|
||||
return fmt.Errorf("node not found: %w", err)
|
||||
}
|
||||
|
||||
// Determine IP to reset
|
||||
resetIP := node.CurrentIP
|
||||
if resetIP == "" {
|
||||
resetIP = node.TargetIP
|
||||
}
|
||||
|
||||
// Execute reset command with graceful=false and reboot flags
|
||||
talosconfigPath := tools.GetTalosconfigPath(m.dataDir, instanceName)
|
||||
cmd := exec.Command("talosctl", "-n", resetIP, "--talosconfig", talosconfigPath, "reset", "--graceful=false", "--reboot")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// Check if error is due to node rebooting (expected after reset command)
|
||||
outputStr := string(output)
|
||||
if strings.Contains(outputStr, "connection refused") || strings.Contains(outputStr, "Unavailable") {
|
||||
// This is expected - node is rebooting after successful reset
|
||||
// Continue with config cleanup
|
||||
} else {
|
||||
// Real error - return it
|
||||
return fmt.Errorf("failed to reset node: %w\nOutput: %s", err, outputStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Update node status to maintenance mode, then remove from config
|
||||
node.Maintenance = true
|
||||
node.Configured = false
|
||||
node.Applied = false
|
||||
if err := m.updateNodeStatus(instanceName, node); err != nil {
|
||||
return fmt.Errorf("failed to update node status: %w", err)
|
||||
}
|
||||
|
||||
// Remove node from config.yaml after successful reset
|
||||
if err := m.deleteFromConfig(instanceName, node.Hostname); err != nil {
|
||||
return fmt.Errorf("failed to remove node from config: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,9 @@ import (
|
||||
"github.com/wild-cloud/wild-central/daemon/internal/tools"
|
||||
)
|
||||
|
||||
// Bootstrap step constants
|
||||
const totalBootstrapSteps = 7
|
||||
|
||||
// Manager handles async operation tracking
|
||||
type Manager struct {
|
||||
dataDir string
|
||||
@@ -23,18 +26,33 @@ func NewManager(dataDir string) *Manager {
|
||||
}
|
||||
}
|
||||
|
||||
// BootstrapProgress tracks detailed bootstrap progress
|
||||
type BootstrapProgress struct {
|
||||
CurrentStep int `json:"current_step"` // 0-6
|
||||
StepName string `json:"step_name"`
|
||||
Attempt int `json:"attempt"`
|
||||
MaxAttempts int `json:"max_attempts"`
|
||||
StepDescription string `json:"step_description"`
|
||||
}
|
||||
|
||||
// OperationDetails contains operation-specific details
|
||||
type OperationDetails struct {
|
||||
BootstrapProgress *BootstrapProgress `json:"bootstrap,omitempty"`
|
||||
}
|
||||
|
||||
// Operation represents a long-running operation
|
||||
type Operation struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"` // discover, setup, download, bootstrap
|
||||
Target string `json:"target"`
|
||||
Instance string `json:"instance"`
|
||||
Status string `json:"status"` // pending, running, completed, failed, cancelled
|
||||
Message string `json:"message,omitempty"`
|
||||
Progress int `json:"progress"` // 0-100
|
||||
LogFile string `json:"logFile,omitempty"` // Path to output log file
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
EndedAt time.Time `json:"ended_at,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"` // discover, setup, download, bootstrap
|
||||
Target string `json:"target"`
|
||||
Instance string `json:"instance"`
|
||||
Status string `json:"status"` // pending, running, completed, failed, cancelled
|
||||
Message string `json:"message,omitempty"`
|
||||
Progress int `json:"progress"` // 0-100
|
||||
Details *OperationDetails `json:"details,omitempty"` // Operation-specific details
|
||||
LogFile string `json:"logFile,omitempty"` // Path to output log file
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
EndedAt time.Time `json:"ended_at,omitempty"`
|
||||
}
|
||||
|
||||
// GetOperationsDir returns the operations directory for an instance
|
||||
@@ -79,20 +97,6 @@ func (m *Manager) Start(instanceName, opType, target string) (string, error) {
|
||||
return opID, nil
|
||||
}
|
||||
|
||||
// Get returns operation status
|
||||
func (m *Manager) Get(opID string) (*Operation, error) {
|
||||
// Operation ID contains instance name, but we need to find it
|
||||
// For now, we'll scan all instances (not ideal but simple)
|
||||
// Better approach: encode instance in operation ID or maintain index
|
||||
|
||||
// Simplified: assume operation ID format is op_{type}_{target}_{timestamp}
|
||||
// We need to know which instance to look in
|
||||
// For now, return error if we can't find it
|
||||
|
||||
// This needs improvement in actual implementation
|
||||
return nil, fmt.Errorf("operation lookup not implemented - need instance context")
|
||||
}
|
||||
|
||||
// GetByInstance returns an operation for a specific instance
|
||||
func (m *Manager) GetByInstance(instanceName, opID string) (*Operation, error) {
|
||||
opsDir := m.GetOperationsDir(instanceName)
|
||||
@@ -238,6 +242,31 @@ func (m *Manager) Cleanup(instanceName string, olderThan time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBootstrapProgress updates bootstrap-specific progress details
|
||||
func (m *Manager) UpdateBootstrapProgress(instanceName, opID string, step int, stepName string, attempt, maxAttempts int, stepDescription string) error {
|
||||
op, err := m.GetByInstance(instanceName, opID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if op.Details == nil {
|
||||
op.Details = &OperationDetails{}
|
||||
}
|
||||
|
||||
op.Details.BootstrapProgress = &BootstrapProgress{
|
||||
CurrentStep: step,
|
||||
StepName: stepName,
|
||||
Attempt: attempt,
|
||||
MaxAttempts: maxAttempts,
|
||||
StepDescription: stepDescription,
|
||||
}
|
||||
|
||||
op.Progress = (step * 100) / (totalBootstrapSteps - 1)
|
||||
op.Message = fmt.Sprintf("Step %d/%d: %s (attempt %d/%d)", step+1, totalBootstrapSteps, stepName, attempt, maxAttempts)
|
||||
|
||||
return m.writeOperation(op)
|
||||
}
|
||||
|
||||
// writeOperation writes operation to disk
|
||||
func (m *Manager) writeOperation(op *Operation) error {
|
||||
opsDir := m.GetOperationsDir(op.Instance)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,107 +1,481 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileExists(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "existing file returns true",
|
||||
setup: func(tmpDir string) string {
|
||||
path := filepath.Join(tmpDir, "test.txt")
|
||||
if err := os.WriteFile(path, []byte("test"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "non-existent file returns false",
|
||||
setup: func(tmpDir string) string {
|
||||
return filepath.Join(tmpDir, "nonexistent.txt")
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "directory path returns true",
|
||||
setup: func(tmpDir string) string {
|
||||
path := filepath.Join(tmpDir, "testdir")
|
||||
if err := os.Mkdir(path, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "empty path returns false",
|
||||
setup: func(tmpDir string) string {
|
||||
return ""
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := tt.setup(tmpDir)
|
||||
got := FileExists(path)
|
||||
if got != tt.expected {
|
||||
t.Errorf("FileExists(%q) = %v, want %v", path, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureDir(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
testDir := filepath.Join(tmpDir, "test", "nested", "dir")
|
||||
|
||||
err := EnsureDir(testDir, 0755)
|
||||
if err != nil {
|
||||
t.Fatalf("EnsureDir failed: %v", err)
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) (string, os.FileMode)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "creates new directory",
|
||||
setup: func(tmpDir string) (string, os.FileMode) {
|
||||
return filepath.Join(tmpDir, "newdir"), 0755
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "idempotent - doesn't error if exists",
|
||||
setup: func(tmpDir string) (string, os.FileMode) {
|
||||
path := filepath.Join(tmpDir, "existingdir")
|
||||
if err := os.Mkdir(path, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path, 0755
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "creates nested directories",
|
||||
setup: func(tmpDir string) (string, os.FileMode) {
|
||||
return filepath.Join(tmpDir, "a", "b", "c", "d"), 0755
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
// Verify directory exists
|
||||
info, err := os.Stat(testDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Directory not created: %v", err)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path, perm := tt.setup(tmpDir)
|
||||
|
||||
err := EnsureDir(path, perm)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("EnsureDir() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
t.Errorf("Directory not created: %v", err)
|
||||
return
|
||||
}
|
||||
if !info.IsDir() {
|
||||
t.Error("Path is not a directory")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
if !info.IsDir() {
|
||||
t.Fatalf("Path is not a directory")
|
||||
}
|
||||
|
||||
func TestReadFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) string
|
||||
wantData []byte
|
||||
wantErr bool
|
||||
errCheck func(error) bool
|
||||
}{
|
||||
{
|
||||
name: "read existing file",
|
||||
setup: func(tmpDir string) string {
|
||||
path := filepath.Join(tmpDir, "test.txt")
|
||||
if err := os.WriteFile(path, []byte("test content"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path
|
||||
},
|
||||
wantData: []byte("test content"),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent file",
|
||||
setup: func(tmpDir string) string {
|
||||
return filepath.Join(tmpDir, "nonexistent.txt")
|
||||
},
|
||||
wantErr: true,
|
||||
errCheck: func(err error) bool {
|
||||
return errors.Is(err, fs.ErrNotExist)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty file",
|
||||
setup: func(tmpDir string) string {
|
||||
path := filepath.Join(tmpDir, "empty.txt")
|
||||
if err := os.WriteFile(path, []byte{}, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path
|
||||
},
|
||||
wantData: []byte{},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
// Calling again should be idempotent
|
||||
err = EnsureDir(testDir, 0755)
|
||||
if err != nil {
|
||||
t.Fatalf("EnsureDir not idempotent: %v", err)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := tt.setup(tmpDir)
|
||||
|
||||
got, err := ReadFile(path)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ReadFile() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantErr && tt.errCheck != nil && !tt.errCheck(err) {
|
||||
t.Errorf("ReadFile() error type mismatch: %v", err)
|
||||
}
|
||||
|
||||
if !tt.wantErr && string(got) != string(tt.wantData) {
|
||||
t.Errorf("ReadFile() = %q, want %q", got, tt.wantData)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
testData := []byte("test content")
|
||||
|
||||
// Write file
|
||||
err := WriteFile(testFile, testData, 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteFile failed: %v", err)
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) (string, []byte, os.FileMode)
|
||||
validate func(t *testing.T, path string, data []byte, perm os.FileMode)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "write new file",
|
||||
setup: func(tmpDir string) (string, []byte, os.FileMode) {
|
||||
return filepath.Join(tmpDir, "new.txt"), []byte("new content"), 0644
|
||||
},
|
||||
validate: func(t *testing.T, path string, data []byte, perm os.FileMode) {
|
||||
got, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to read written file: %v", err)
|
||||
}
|
||||
if string(got) != string(data) {
|
||||
t.Errorf("Content = %q, want %q", got, data)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "overwrite existing file",
|
||||
setup: func(tmpDir string) (string, []byte, os.FileMode) {
|
||||
path := filepath.Join(tmpDir, "existing.txt")
|
||||
if err := os.WriteFile(path, []byte("old content"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path, []byte("new content"), 0644
|
||||
},
|
||||
validate: func(t *testing.T, path string, data []byte, perm os.FileMode) {
|
||||
got, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to read overwritten file: %v", err)
|
||||
}
|
||||
if string(got) != string(data) {
|
||||
t.Errorf("Content = %q, want %q", got, data)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "correct permissions applied",
|
||||
setup: func(tmpDir string) (string, []byte, os.FileMode) {
|
||||
return filepath.Join(tmpDir, "perms.txt"), []byte("test"), 0600
|
||||
},
|
||||
validate: func(t *testing.T, path string, data []byte, perm os.FileMode) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to stat file: %v", err)
|
||||
return
|
||||
}
|
||||
if info.Mode().Perm() != perm {
|
||||
t.Errorf("Permissions = %o, want %o", info.Mode().Perm(), perm)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Read file back
|
||||
data, err := os.ReadFile(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadFile failed: %v", err)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path, data, perm := tt.setup(tmpDir)
|
||||
|
||||
if string(data) != string(testData) {
|
||||
t.Fatalf("Data mismatch: got %q, want %q", string(data), string(testData))
|
||||
}
|
||||
}
|
||||
err := WriteFile(path, data, perm)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("WriteFile() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
func TestFileExists(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
|
||||
// File should not exist initially
|
||||
if FileExists(testFile) {
|
||||
t.Fatalf("File should not exist")
|
||||
}
|
||||
|
||||
// Create file
|
||||
err := WriteFile(testFile, []byte("test"), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteFile failed: %v", err)
|
||||
}
|
||||
|
||||
// File should exist now
|
||||
if !FileExists(testFile) {
|
||||
t.Fatalf("File should exist")
|
||||
if !tt.wantErr && tt.validate != nil {
|
||||
tt.validate(t, path, data, perm)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithLock(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockFile := filepath.Join(tmpDir, "test.lock")
|
||||
counter := 0
|
||||
t.Run("acquires and releases lock", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "test.lock")
|
||||
executed := false
|
||||
|
||||
// Execute with lock
|
||||
err := WithLock(lockFile, func() error {
|
||||
counter++
|
||||
return nil
|
||||
err := WithLock(lockPath, func() error {
|
||||
executed = true
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("WithLock() error = %v", err)
|
||||
}
|
||||
if !executed {
|
||||
t.Error("Function was not executed")
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("WithLock failed: %v", err)
|
||||
}
|
||||
|
||||
if counter != 1 {
|
||||
t.Fatalf("Function not executed: counter=%d", counter)
|
||||
}
|
||||
t.Run("releases lock after executing", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "test.lock")
|
||||
|
||||
// Should be idempotent - can acquire lock multiple times sequentially
|
||||
err = WithLock(lockFile, func() error {
|
||||
counter++
|
||||
return nil
|
||||
err := WithLock(lockPath, func() error {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("First lock failed: %v", err)
|
||||
}
|
||||
|
||||
err = WithLock(lockPath, func() error {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Second lock failed (lock not released): %v", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("WithLock failed on second call: %v", err)
|
||||
|
||||
t.Run("concurrent access blocked", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "concurrent.lock")
|
||||
|
||||
var counter atomic.Int32
|
||||
var wg sync.WaitGroup
|
||||
goroutines := 10
|
||||
|
||||
for i := 0; i < goroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := WithLock(lockPath, func() error {
|
||||
current := counter.Load()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
counter.Store(current + 1)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("WithLock() error = %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if counter.Load() != int32(goroutines) {
|
||||
t.Errorf("Counter = %d, want %d (concurrent access not properly blocked)", counter.Load(), goroutines)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("lock released on error", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "error.lock")
|
||||
testErr := errors.New("test error")
|
||||
|
||||
err := WithLock(lockPath, func() error {
|
||||
return testErr
|
||||
})
|
||||
if err != testErr {
|
||||
t.Errorf("Expected error %v, got %v", testErr, err)
|
||||
}
|
||||
|
||||
err = WithLock(lockPath, func() error {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Lock not released after error: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("lock released on panic", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "panic.lock")
|
||||
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Error("Expected panic")
|
||||
}
|
||||
}()
|
||||
_ = WithLock(lockPath, func() error {
|
||||
panic("test panic")
|
||||
})
|
||||
}()
|
||||
|
||||
err := WithLock(lockPath, func() error {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Lock not released after panic: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLockManual(t *testing.T) {
|
||||
t.Run("manual acquire and release", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "manual.lock")
|
||||
|
||||
lock, err := AcquireLock(lockPath)
|
||||
if err != nil {
|
||||
t.Fatalf("AcquireLock() error = %v", err)
|
||||
}
|
||||
|
||||
err = lock.Release()
|
||||
if err != nil {
|
||||
t.Errorf("Release() error = %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("double release is safe", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
lockPath := filepath.Join(tmpDir, "double.lock")
|
||||
|
||||
lock, err := AcquireLock(lockPath)
|
||||
if err != nil {
|
||||
t.Fatalf("AcquireLock() error = %v", err)
|
||||
}
|
||||
|
||||
err = lock.Release()
|
||||
if err != nil {
|
||||
t.Errorf("First Release() error = %v", err)
|
||||
}
|
||||
|
||||
err = lock.Release()
|
||||
if err != nil {
|
||||
t.Errorf("Second Release() error = %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestEnsureFilePermissions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) string
|
||||
perm os.FileMode
|
||||
wantErr bool
|
||||
errCheck func(error) bool
|
||||
}{
|
||||
{
|
||||
name: "sets permissions on existing file",
|
||||
setup: func(tmpDir string) string {
|
||||
path := filepath.Join(tmpDir, "test.txt")
|
||||
if err := os.WriteFile(path, []byte("test"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return path
|
||||
},
|
||||
perm: 0600,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent file returns error",
|
||||
setup: func(tmpDir string) string {
|
||||
return filepath.Join(tmpDir, "nonexistent.txt")
|
||||
},
|
||||
perm: 0644,
|
||||
wantErr: true,
|
||||
errCheck: func(err error) bool {
|
||||
return errors.Is(err, fs.ErrNotExist)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if counter != 2 {
|
||||
t.Fatalf("Function not executed on second call: counter=%d", counter)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
path := tt.setup(tmpDir)
|
||||
|
||||
err := EnsureFilePermissions(path, tt.perm)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("EnsureFilePermissions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantErr && tt.errCheck != nil && !tt.errCheck(err) {
|
||||
t.Errorf("EnsureFilePermissions() error type mismatch: %v", err)
|
||||
}
|
||||
|
||||
if !tt.wantErr {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to stat file: %v", err)
|
||||
return
|
||||
}
|
||||
if info.Mode().Perm() != tt.perm {
|
||||
t.Errorf("Permissions = %o, want %o", info.Mode().Perm(), tt.perm)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,15 +26,15 @@ func NewKubectl(kubeconfigPath string) *Kubectl {
|
||||
|
||||
// PodInfo represents pod information from kubectl
|
||||
type PodInfo struct {
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Ready string `json:"ready"`
|
||||
Restarts int `json:"restarts"`
|
||||
Age string `json:"age"`
|
||||
Node string `json:"node,omitempty"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
Containers []ContainerInfo `json:"containers,omitempty"`
|
||||
Conditions []PodCondition `json:"conditions,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Ready string `json:"ready"`
|
||||
Restarts int `json:"restarts"`
|
||||
Age string `json:"age"`
|
||||
Node string `json:"node,omitempty"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
Containers []ContainerInfo `json:"containers,omitempty"`
|
||||
Conditions []PodCondition `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// ContainerInfo represents detailed container information
|
||||
@@ -195,7 +195,7 @@ func (k *Kubectl) GetPods(namespace string, detailed bool) ([]PodInfo, error) {
|
||||
Ready bool `json:"ready"`
|
||||
RestartCount int `json:"restartCount"`
|
||||
State struct {
|
||||
Running *struct{ StartedAt time.Time } `json:"running,omitempty"`
|
||||
Running *struct{ StartedAt time.Time } `json:"running,omitempty"`
|
||||
Waiting *struct{ Reason, Message string } `json:"waiting,omitempty"`
|
||||
Terminated *struct {
|
||||
Reason string
|
||||
|
||||
750
internal/tools/kubectl_test.go
Normal file
750
internal/tools/kubectl_test.go
Normal file
@@ -0,0 +1,750 @@
|
||||
package tools
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewKubectl(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
kubeconfigPath string
|
||||
}{
|
||||
{
|
||||
name: "creates Kubectl with kubeconfig path",
|
||||
kubeconfigPath: "/path/to/kubeconfig",
|
||||
},
|
||||
{
|
||||
name: "creates Kubectl with empty path",
|
||||
kubeconfigPath: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
k := NewKubectl(tt.kubeconfigPath)
|
||||
if k == nil {
|
||||
t.Fatal("NewKubectl() returned nil")
|
||||
}
|
||||
if k.kubeconfigPath != tt.kubeconfigPath {
|
||||
t.Errorf("kubeconfigPath = %q, want %q", k.kubeconfigPath, tt.kubeconfigPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlDeploymentExists(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
depName string
|
||||
namespace string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "check deployment exists",
|
||||
depName: "test-deployment",
|
||||
namespace: "default",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
exists := k.DeploymentExists(tt.depName, tt.namespace)
|
||||
_ = exists // Result depends on actual cluster state
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetPods(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
detailed bool
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get pods basic",
|
||||
namespace: "default",
|
||||
detailed: false,
|
||||
skipTest: true,
|
||||
},
|
||||
{
|
||||
name: "get pods detailed",
|
||||
namespace: "kube-system",
|
||||
detailed: true,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
pods, err := k.GetPods(tt.namespace, tt.detailed)
|
||||
|
||||
if err == nil {
|
||||
if pods == nil {
|
||||
t.Error("GetPods() returned nil slice without error")
|
||||
}
|
||||
// Verify pod structure
|
||||
for i, pod := range pods {
|
||||
if pod.Name == "" {
|
||||
t.Errorf("pod[%d].Name is empty", i)
|
||||
}
|
||||
if pod.Status == "" {
|
||||
t.Errorf("pod[%d].Status is empty", i)
|
||||
}
|
||||
if pod.Ready == "" {
|
||||
t.Errorf("pod[%d].Ready is empty", i)
|
||||
}
|
||||
if pod.Age == "" {
|
||||
t.Errorf("pod[%d].Age is empty", i)
|
||||
}
|
||||
if tt.detailed && pod.Containers == nil {
|
||||
t.Errorf("pod[%d].Containers is nil in detailed mode", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetFirstPodName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get first pod name",
|
||||
namespace: "kube-system",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
podName, err := k.GetFirstPodName(tt.namespace)
|
||||
|
||||
if err == nil {
|
||||
if podName == "" {
|
||||
t.Error("GetFirstPodName() returned empty string without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetPodContainers(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
podName string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get pod containers",
|
||||
namespace: "kube-system",
|
||||
podName: "coredns-123",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
containers, err := k.GetPodContainers(tt.namespace, tt.podName)
|
||||
|
||||
if err == nil {
|
||||
if containers == nil {
|
||||
t.Error("GetPodContainers() returned nil slice without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetDeployment(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
depName string
|
||||
namespace string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get deployment info",
|
||||
depName: "test-deployment",
|
||||
namespace: "default",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
depInfo, err := k.GetDeployment(tt.depName, tt.namespace)
|
||||
|
||||
if err == nil {
|
||||
if depInfo == nil {
|
||||
t.Error("GetDeployment() returned nil without error")
|
||||
}
|
||||
// Desired should be non-negative
|
||||
if depInfo.Desired < 0 {
|
||||
t.Errorf("Desired = %d, should be non-negative", depInfo.Desired)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get replicas for namespace",
|
||||
namespace: "default",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
replicaInfo, err := k.GetReplicas(tt.namespace)
|
||||
|
||||
if err == nil {
|
||||
if replicaInfo == nil {
|
||||
t.Error("GetReplicas() returned nil without error")
|
||||
}
|
||||
// All values should be non-negative
|
||||
if replicaInfo.Desired < 0 {
|
||||
t.Error("Desired < 0")
|
||||
}
|
||||
if replicaInfo.Current < 0 {
|
||||
t.Error("Current < 0")
|
||||
}
|
||||
if replicaInfo.Ready < 0 {
|
||||
t.Error("Ready < 0")
|
||||
}
|
||||
if replicaInfo.Available < 0 {
|
||||
t.Error("Available < 0")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetResources(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get resources for namespace",
|
||||
namespace: "default",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
usage, err := k.GetResources(tt.namespace)
|
||||
|
||||
if err == nil {
|
||||
if usage == nil {
|
||||
t.Error("GetResources() returned nil without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetRecentEvents(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
limit int
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get recent events",
|
||||
namespace: "default",
|
||||
limit: 10,
|
||||
skipTest: true,
|
||||
},
|
||||
{
|
||||
name: "get all events with zero limit",
|
||||
namespace: "default",
|
||||
limit: 0,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
events, err := k.GetRecentEvents(tt.namespace, tt.limit)
|
||||
|
||||
if err == nil {
|
||||
if events == nil {
|
||||
t.Error("GetRecentEvents() returned nil slice without error")
|
||||
}
|
||||
if tt.limit > 0 && len(events) > tt.limit {
|
||||
t.Errorf("len(events) = %d, want <= %d", len(events), tt.limit)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlGetLogs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
podName string
|
||||
opts LogOptions
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get logs with tail",
|
||||
namespace: "kube-system",
|
||||
podName: "coredns-123",
|
||||
opts: LogOptions{Tail: 100},
|
||||
skipTest: true,
|
||||
},
|
||||
{
|
||||
name: "get logs with container",
|
||||
namespace: "kube-system",
|
||||
podName: "coredns-123",
|
||||
opts: LogOptions{Container: "coredns", Tail: 50},
|
||||
skipTest: true,
|
||||
},
|
||||
{
|
||||
name: "get previous logs",
|
||||
namespace: "default",
|
||||
podName: "test-pod",
|
||||
opts: LogOptions{Previous: true, Tail: 100},
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
logs, err := k.GetLogs(tt.namespace, tt.podName, tt.opts)
|
||||
|
||||
if err == nil {
|
||||
if logs == nil {
|
||||
t.Error("GetLogs() returned nil slice without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKubectlStreamLogs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
podName string
|
||||
opts LogOptions
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "stream logs",
|
||||
namespace: "default",
|
||||
podName: "test-pod",
|
||||
opts: LogOptions{Tail: 10},
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires kubectl and running cluster")
|
||||
}
|
||||
|
||||
k := NewKubectl("")
|
||||
cmd, err := k.StreamLogs(tt.namespace, tt.podName, tt.opts)
|
||||
|
||||
if err == nil {
|
||||
if cmd == nil {
|
||||
t.Error("StreamLogs() returned nil command without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatAge(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
duration time.Duration
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "seconds",
|
||||
duration: 45 * time.Second,
|
||||
want: "45s",
|
||||
},
|
||||
{
|
||||
name: "minutes",
|
||||
duration: 5 * time.Minute,
|
||||
want: "5m",
|
||||
},
|
||||
{
|
||||
name: "hours",
|
||||
duration: 3 * time.Hour,
|
||||
want: "3h",
|
||||
},
|
||||
{
|
||||
name: "days",
|
||||
duration: 48 * time.Hour,
|
||||
want: "2d",
|
||||
},
|
||||
{
|
||||
name: "less than minute",
|
||||
duration: 30 * time.Second,
|
||||
want: "30s",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := formatAge(tt.duration)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatAge(%v) = %q, want %q", tt.duration, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseResourceQuantity(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
quantity string
|
||||
want int64
|
||||
}{
|
||||
{
|
||||
name: "millicores",
|
||||
quantity: "500m",
|
||||
want: 500,
|
||||
},
|
||||
{
|
||||
name: "cores as plain number",
|
||||
quantity: "2",
|
||||
want: 2,
|
||||
},
|
||||
{
|
||||
name: "Ki suffix",
|
||||
quantity: "100Ki",
|
||||
want: 100 * 1024,
|
||||
},
|
||||
{
|
||||
name: "Mi suffix",
|
||||
quantity: "512Mi",
|
||||
want: 512 * 1024 * 1024,
|
||||
},
|
||||
{
|
||||
name: "Gi suffix",
|
||||
quantity: "2Gi",
|
||||
want: 2 * 1024 * 1024 * 1024,
|
||||
},
|
||||
{
|
||||
name: "K suffix",
|
||||
quantity: "100K",
|
||||
want: 100 * 1000,
|
||||
},
|
||||
{
|
||||
name: "M suffix",
|
||||
quantity: "500M",
|
||||
want: 500 * 1000 * 1000,
|
||||
},
|
||||
{
|
||||
name: "G suffix",
|
||||
quantity: "1G",
|
||||
want: 1 * 1000 * 1000 * 1000,
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
quantity: "",
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "whitespace",
|
||||
quantity: " ",
|
||||
want: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseResourceQuantity(tt.quantity)
|
||||
if got != tt.want {
|
||||
t.Errorf("parseResourceQuantity(%q) = %d, want %d", tt.quantity, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatCPU(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
millicores int64
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "zero",
|
||||
millicores: 0,
|
||||
want: "0",
|
||||
},
|
||||
{
|
||||
name: "millicores",
|
||||
millicores: 500,
|
||||
want: "500m",
|
||||
},
|
||||
{
|
||||
name: "one core",
|
||||
millicores: 1000,
|
||||
want: "1.0",
|
||||
},
|
||||
{
|
||||
name: "two and half cores",
|
||||
millicores: 2500,
|
||||
want: "2.5",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := formatCPU(tt.millicores)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatCPU(%d) = %q, want %q", tt.millicores, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatMemory(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
bytes int64
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "zero",
|
||||
bytes: 0,
|
||||
want: "0",
|
||||
},
|
||||
{
|
||||
name: "bytes",
|
||||
bytes: 512,
|
||||
want: "512B",
|
||||
},
|
||||
{
|
||||
name: "kibibytes",
|
||||
bytes: 1024,
|
||||
want: "1.0Ki",
|
||||
},
|
||||
{
|
||||
name: "mebibytes",
|
||||
bytes: 1024 * 1024,
|
||||
want: "1.0Mi",
|
||||
},
|
||||
{
|
||||
name: "gibibytes",
|
||||
bytes: 2 * 1024 * 1024 * 1024,
|
||||
want: "2.0Gi",
|
||||
},
|
||||
{
|
||||
name: "tebibytes",
|
||||
bytes: 1024 * 1024 * 1024 * 1024,
|
||||
want: "1.0Ti",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := formatMemory(tt.bytes)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatMemory(%d) = %q, want %q", tt.bytes, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodInfoStruct(t *testing.T) {
|
||||
t.Run("PodInfo has required fields", func(t *testing.T) {
|
||||
pod := PodInfo{
|
||||
Name: "test-pod",
|
||||
Status: "Running",
|
||||
Ready: "1/1",
|
||||
Restarts: 0,
|
||||
Age: "5m",
|
||||
Node: "node-1",
|
||||
IP: "10.0.0.1",
|
||||
}
|
||||
|
||||
if pod.Name != "test-pod" {
|
||||
t.Errorf("Name = %q, want %q", pod.Name, "test-pod")
|
||||
}
|
||||
if pod.Status != "Running" {
|
||||
t.Errorf("Status = %q, want %q", pod.Status, "Running")
|
||||
}
|
||||
if pod.Ready != "1/1" {
|
||||
t.Errorf("Ready = %q, want %q", pod.Ready, "1/1")
|
||||
}
|
||||
if pod.Restarts != 0 {
|
||||
t.Errorf("Restarts = %d, want %d", pod.Restarts, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestContainerInfoStruct(t *testing.T) {
|
||||
t.Run("ContainerInfo has required fields", func(t *testing.T) {
|
||||
container := ContainerInfo{
|
||||
Name: "test-container",
|
||||
Image: "nginx:latest",
|
||||
Ready: true,
|
||||
RestartCount: 0,
|
||||
State: ContainerState{
|
||||
Status: "running",
|
||||
Since: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
if container.Name != "test-container" {
|
||||
t.Errorf("Name = %q, want %q", container.Name, "test-container")
|
||||
}
|
||||
if !container.Ready {
|
||||
t.Error("Ready should be true")
|
||||
}
|
||||
if container.State.Status != "running" {
|
||||
t.Errorf("State.Status = %q, want %q", container.State.Status, "running")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeploymentInfoStruct(t *testing.T) {
|
||||
t.Run("DeploymentInfo has required fields", func(t *testing.T) {
|
||||
dep := DeploymentInfo{
|
||||
Desired: 3,
|
||||
Current: 3,
|
||||
Ready: 3,
|
||||
Available: 3,
|
||||
}
|
||||
|
||||
if dep.Desired != 3 {
|
||||
t.Errorf("Desired = %d, want %d", dep.Desired, 3)
|
||||
}
|
||||
if dep.Current != 3 {
|
||||
t.Errorf("Current = %d, want %d", dep.Current, 3)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestResourceMetricStruct(t *testing.T) {
|
||||
t.Run("ResourceMetric has required fields", func(t *testing.T) {
|
||||
metric := ResourceMetric{
|
||||
Used: "1.5",
|
||||
Requested: "2.0",
|
||||
Limit: "4.0",
|
||||
Percentage: 37.5,
|
||||
}
|
||||
|
||||
if metric.Used != "1.5" {
|
||||
t.Errorf("Used = %q, want %q", metric.Used, "1.5")
|
||||
}
|
||||
if metric.Percentage != 37.5 {
|
||||
t.Errorf("Percentage = %f, want %f", metric.Percentage, 37.5)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLogOptionsStruct(t *testing.T) {
|
||||
t.Run("LogOptions has all option fields", func(t *testing.T) {
|
||||
opts := LogOptions{
|
||||
Container: "nginx",
|
||||
Tail: 100,
|
||||
Previous: true,
|
||||
Since: "5m",
|
||||
SinceSeconds: 300,
|
||||
}
|
||||
|
||||
if opts.Container != "nginx" {
|
||||
t.Errorf("Container = %q, want %q", opts.Container, "nginx")
|
||||
}
|
||||
if opts.Tail != 100 {
|
||||
t.Errorf("Tail = %d, want %d", opts.Tail, 100)
|
||||
}
|
||||
if !opts.Previous {
|
||||
t.Error("Previous should be true")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestKubernetesEventStruct(t *testing.T) {
|
||||
t.Run("KubernetesEvent has required fields", func(t *testing.T) {
|
||||
now := time.Now()
|
||||
event := KubernetesEvent{
|
||||
Type: "Warning",
|
||||
Reason: "BackOff",
|
||||
Message: "Back-off restarting failed container",
|
||||
Count: 5,
|
||||
FirstSeen: now.Add(-5 * time.Minute),
|
||||
LastSeen: now,
|
||||
Object: "Pod/test-pod",
|
||||
}
|
||||
|
||||
if event.Type != "Warning" {
|
||||
t.Errorf("Type = %q, want %q", event.Type, "Warning")
|
||||
}
|
||||
if event.Count != 5 {
|
||||
t.Errorf("Count = %d, want %d", event.Count, 5)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
package tools
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Talosctl provides a thin wrapper around the talosctl command-line tool
|
||||
@@ -92,8 +94,11 @@ func (t *Talosctl) GetDisks(nodeIP string, insecure bool) ([]DiskInfo, error) {
|
||||
args = append(args, "--insecure")
|
||||
}
|
||||
|
||||
// Build args with talosconfig if available
|
||||
finalArgs := t.buildArgs(args)
|
||||
|
||||
// Use jq to slurp the NDJSON into an array (like v.PoC does with jq -s)
|
||||
talosCmd := exec.Command("talosctl", args...)
|
||||
talosCmd := exec.Command("talosctl", finalArgs...)
|
||||
jqCmd := exec.Command("jq", "-s", ".")
|
||||
|
||||
// Pipe talosctl output to jq
|
||||
@@ -171,8 +176,11 @@ func (t *Talosctl) getResourceJSON(resourceType, nodeIP string, insecure bool) (
|
||||
args = append(args, "--insecure")
|
||||
}
|
||||
|
||||
// Build args with talosconfig if available
|
||||
finalArgs := t.buildArgs(args)
|
||||
|
||||
// Use jq to slurp the NDJSON into an array
|
||||
talosCmd := exec.Command("talosctl", args...)
|
||||
talosCmd := exec.Command("talosctl", finalArgs...)
|
||||
jqCmd := exec.Command("jq", "-s", ".")
|
||||
|
||||
// Pipe talosctl output to jq
|
||||
@@ -280,20 +288,45 @@ func (t *Talosctl) GetPhysicalInterface(nodeIP string, insecure bool) (string, e
|
||||
|
||||
// GetVersion gets Talos version from a node
|
||||
func (t *Talosctl) GetVersion(nodeIP string, insecure bool) (string, error) {
|
||||
args := t.buildArgs([]string{
|
||||
"version",
|
||||
"--nodes", nodeIP,
|
||||
"--short",
|
||||
})
|
||||
var args []string
|
||||
|
||||
// When using insecure mode (for maintenance mode nodes), don't use talosconfig
|
||||
// Insecure mode is for unconfigured nodes that don't have authentication set up
|
||||
if insecure {
|
||||
args = append(args, "--insecure")
|
||||
args = []string{
|
||||
"version",
|
||||
"--nodes", nodeIP,
|
||||
"--short",
|
||||
"--insecure",
|
||||
}
|
||||
} else {
|
||||
// For configured nodes, use talosconfig if available
|
||||
args = t.buildArgs([]string{
|
||||
"version",
|
||||
"--nodes", nodeIP,
|
||||
"--short",
|
||||
})
|
||||
}
|
||||
|
||||
cmd := exec.Command("talosctl", args...)
|
||||
// Use context with timeout to prevent hanging on unreachable nodes
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "talosctl", args...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
outputStr := string(output)
|
||||
|
||||
// Special case: In maintenance mode, talosctl version returns an error
|
||||
// "API is not implemented in maintenance mode" but this means the node IS reachable
|
||||
// and IS in maintenance mode, so we treat this as a success
|
||||
if err != nil && strings.Contains(outputStr, "API is not implemented in maintenance mode") {
|
||||
// Extract client version from output as the node version
|
||||
// Since we can't get server version in maintenance mode
|
||||
return "maintenance", nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("talosctl version failed: %w\nOutput: %s", err, string(output))
|
||||
return "", fmt.Errorf("talosctl version failed: %w\nOutput: %s", err, outputStr)
|
||||
}
|
||||
|
||||
// Parse output to extract server version
|
||||
|
||||
558
internal/tools/talosctl_test.go
Normal file
558
internal/tools/talosctl_test.go
Normal file
@@ -0,0 +1,558 @@
|
||||
package tools
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewTalosctl(t *testing.T) {
|
||||
t.Run("creates Talosctl instance without config", func(t *testing.T) {
|
||||
tc := NewTalosctl()
|
||||
if tc == nil {
|
||||
t.Fatal("NewTalosctl() returned nil")
|
||||
}
|
||||
if tc.talosconfigPath != "" {
|
||||
t.Error("talosconfigPath should be empty for NewTalosctl()")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("creates Talosctl instance with config", func(t *testing.T) {
|
||||
configPath := "/path/to/talosconfig"
|
||||
tc := NewTalosconfigWithConfig(configPath)
|
||||
if tc == nil {
|
||||
t.Fatal("NewTalosconfigWithConfig() returned nil")
|
||||
}
|
||||
if tc.talosconfigPath != configPath {
|
||||
t.Errorf("talosconfigPath = %q, want %q", tc.talosconfigPath, configPath)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestTalosconfigBuildArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
talosconfigPath string
|
||||
baseArgs []string
|
||||
wantPrefix []string
|
||||
}{
|
||||
{
|
||||
name: "no talosconfig adds no prefix",
|
||||
talosconfigPath: "",
|
||||
baseArgs: []string{"version", "--short"},
|
||||
wantPrefix: nil,
|
||||
},
|
||||
{
|
||||
name: "with talosconfig adds prefix",
|
||||
talosconfigPath: "/path/to/talosconfig",
|
||||
baseArgs: []string{"version", "--short"},
|
||||
wantPrefix: []string{"--talosconfig", "/path/to/talosconfig"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tc := &Talosctl{talosconfigPath: tt.talosconfigPath}
|
||||
got := tc.buildArgs(tt.baseArgs)
|
||||
|
||||
if tt.wantPrefix == nil {
|
||||
// Should return baseArgs unchanged
|
||||
if len(got) != len(tt.baseArgs) {
|
||||
t.Errorf("buildArgs() length = %d, want %d", len(got), len(tt.baseArgs))
|
||||
}
|
||||
for i, arg := range tt.baseArgs {
|
||||
if i >= len(got) || got[i] != arg {
|
||||
t.Errorf("buildArgs()[%d] = %q, want %q", i, got[i], arg)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Should have prefix + baseArgs
|
||||
expectedLen := len(tt.wantPrefix) + len(tt.baseArgs)
|
||||
if len(got) != expectedLen {
|
||||
t.Errorf("buildArgs() length = %d, want %d", len(got), expectedLen)
|
||||
}
|
||||
// Check prefix
|
||||
for i, arg := range tt.wantPrefix {
|
||||
if i >= len(got) || got[i] != arg {
|
||||
t.Errorf("buildArgs() prefix[%d] = %q, want %q", i, got[i], arg)
|
||||
}
|
||||
}
|
||||
// Check baseArgs follow prefix
|
||||
for i, arg := range tt.baseArgs {
|
||||
idx := len(tt.wantPrefix) + i
|
||||
if idx >= len(got) || got[idx] != arg {
|
||||
t.Errorf("buildArgs()[%d] = %q, want %q", idx, got[idx], arg)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGenConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
endpoint string
|
||||
outputDir string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "gen config with valid params",
|
||||
clusterName: "test-cluster",
|
||||
endpoint: "https://192.168.1.100:6443",
|
||||
outputDir: "testdata",
|
||||
skipTest: true, // Skip actual execution
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
tc := NewTalosctl()
|
||||
err := tc.GenConfig(tt.clusterName, tt.endpoint, tmpDir)
|
||||
|
||||
// This will fail without talosctl, but tests the method signature
|
||||
if err == nil {
|
||||
// If it somehow succeeds, verify files were created
|
||||
expectedFiles := []string{
|
||||
"controlplane.yaml",
|
||||
"worker.yaml",
|
||||
"talosconfig",
|
||||
}
|
||||
for _, file := range expectedFiles {
|
||||
path := filepath.Join(tmpDir, file)
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
t.Errorf("Expected file not created: %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigApplyConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
configFile string
|
||||
insecure bool
|
||||
talosconfigPath string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "apply config with all params",
|
||||
nodeIP: "192.168.1.100",
|
||||
configFile: "/path/to/config.yaml",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
{
|
||||
name: "apply config with talosconfig",
|
||||
nodeIP: "192.168.1.100",
|
||||
configFile: "/path/to/config.yaml",
|
||||
insecure: false,
|
||||
talosconfigPath: "/path/to/talosconfig",
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
err := tc.ApplyConfig(tt.nodeIP, tt.configFile, tt.insecure, tt.talosconfigPath)
|
||||
|
||||
// Will fail without talosctl, but tests method signature
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGetDisks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
insecure bool
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get disks in insecure mode",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary and running node")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
disks, err := tc.GetDisks(tt.nodeIP, tt.insecure)
|
||||
|
||||
if err == nil {
|
||||
// If successful, verify return type
|
||||
if disks == nil {
|
||||
t.Error("GetDisks() returned nil slice without error")
|
||||
}
|
||||
// Each disk should have path and size
|
||||
for i, disk := range disks {
|
||||
if disk.Path == "" {
|
||||
t.Errorf("disk[%d].Path is empty", i)
|
||||
}
|
||||
if disk.Size <= 0 {
|
||||
t.Errorf("disk[%d].Size = %d, want > 0", i, disk.Size)
|
||||
}
|
||||
// Size should be > 10GB per filtering
|
||||
if disk.Size <= 10000000000 {
|
||||
t.Errorf("disk[%d].Size = %d, should be filtered (> 10GB)", i, disk.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGetLinks(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
insecure bool
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get links in insecure mode",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary and running node")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
links, err := tc.GetLinks(tt.nodeIP, tt.insecure)
|
||||
|
||||
if err == nil {
|
||||
if links == nil {
|
||||
t.Error("GetLinks() returned nil slice without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGetRoutes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
insecure bool
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get routes in insecure mode",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary and running node")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
routes, err := tc.GetRoutes(tt.nodeIP, tt.insecure)
|
||||
|
||||
if err == nil {
|
||||
if routes == nil {
|
||||
t.Error("GetRoutes() returned nil slice without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGetDefaultInterface(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
insecure bool
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get default interface",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary and running node")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
iface, err := tc.GetDefaultInterface(tt.nodeIP, tt.insecure)
|
||||
|
||||
if err == nil {
|
||||
if iface == "" {
|
||||
t.Error("GetDefaultInterface() returned empty string without error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGetPhysicalInterface(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
insecure bool
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get physical interface",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary and running node")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
iface, err := tc.GetPhysicalInterface(tt.nodeIP, tt.insecure)
|
||||
|
||||
if err == nil {
|
||||
if iface == "" {
|
||||
t.Error("GetPhysicalInterface() returned empty string without error")
|
||||
}
|
||||
// Should not be loopback
|
||||
if iface == "lo" {
|
||||
t.Error("GetPhysicalInterface() returned loopback interface")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigGetVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeIP string
|
||||
insecure bool
|
||||
want string // Expected for maintenance mode or version string
|
||||
skipTest bool
|
||||
}{
|
||||
{
|
||||
name: "get version in insecure mode",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: true,
|
||||
skipTest: true,
|
||||
},
|
||||
{
|
||||
name: "get version in secure mode",
|
||||
nodeIP: "192.168.1.100",
|
||||
insecure: false,
|
||||
skipTest: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.skipTest {
|
||||
t.Skip("Skipping test that requires talosctl binary and running node")
|
||||
}
|
||||
|
||||
tc := NewTalosctl()
|
||||
version, err := tc.GetVersion(tt.nodeIP, tt.insecure)
|
||||
|
||||
if err == nil {
|
||||
if version == "" {
|
||||
t.Error("GetVersion() returned empty string without error")
|
||||
}
|
||||
// Version should be either "maintenance" or start with "v"
|
||||
if version != "maintenance" && version[0] != 'v' {
|
||||
t.Errorf("GetVersion() = %q, expected 'maintenance' or version starting with 'v'", version)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTalosconfigValidate(t *testing.T) {
|
||||
t.Run("validate checks for talosctl", func(t *testing.T) {
|
||||
tc := NewTalosctl()
|
||||
err := tc.Validate()
|
||||
|
||||
// This will pass if talosctl is installed, fail otherwise
|
||||
// We can't guarantee talosctl is installed in all test environments
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
func TestDiskInfoStruct(t *testing.T) {
|
||||
t.Run("DiskInfo has required fields", func(t *testing.T) {
|
||||
disk := DiskInfo{
|
||||
Path: "/dev/sda",
|
||||
Size: 1000000000000, // 1TB
|
||||
}
|
||||
|
||||
if disk.Path != "/dev/sda" {
|
||||
t.Errorf("Path = %q, want %q", disk.Path, "/dev/sda")
|
||||
}
|
||||
if disk.Size != 1000000000000 {
|
||||
t.Errorf("Size = %d, want %d", disk.Size, 1000000000000)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestTalosconfigResourceJSONParsing(t *testing.T) {
|
||||
// This test verifies the logic of getResourceJSON without actually calling talosctl
|
||||
t.Run("getResourceJSON uses correct command structure", func(t *testing.T) {
|
||||
tc := &Talosctl{talosconfigPath: "/path/to/talosconfig"}
|
||||
|
||||
// We can't easily test the actual command execution without mocking,
|
||||
// but we can verify buildArgs works correctly
|
||||
baseArgs := []string{"get", "disks", "--nodes", "192.168.1.100", "-o", "json"}
|
||||
finalArgs := tc.buildArgs(baseArgs)
|
||||
|
||||
// Should have talosconfig prepended
|
||||
if len(finalArgs) < 2 || finalArgs[0] != "--talosconfig" {
|
||||
t.Error("buildArgs() should prepend --talosconfig")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestTalosconfigInterfaceFiltering(t *testing.T) {
|
||||
// Test the logic for filtering physical interfaces
|
||||
tests := []struct {
|
||||
name string
|
||||
interfaceName string
|
||||
linkType string
|
||||
operState string
|
||||
shouldAccept bool
|
||||
}{
|
||||
{
|
||||
name: "eth0 up and ethernet",
|
||||
interfaceName: "eth0",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: true,
|
||||
},
|
||||
{
|
||||
name: "eno1 up and ethernet",
|
||||
interfaceName: "eno1",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: true,
|
||||
},
|
||||
{
|
||||
name: "loopback should be filtered",
|
||||
interfaceName: "lo",
|
||||
linkType: "loopback",
|
||||
operState: "up",
|
||||
shouldAccept: false,
|
||||
},
|
||||
{
|
||||
name: "cni interface should be filtered",
|
||||
interfaceName: "cni0",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: false,
|
||||
},
|
||||
{
|
||||
name: "flannel interface should be filtered",
|
||||
interfaceName: "flannel.1",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: false,
|
||||
},
|
||||
{
|
||||
name: "docker interface should be filtered",
|
||||
interfaceName: "docker0",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: false,
|
||||
},
|
||||
{
|
||||
name: "bridge interface should be filtered",
|
||||
interfaceName: "br-1234",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: false,
|
||||
},
|
||||
{
|
||||
name: "veth interface should be filtered",
|
||||
interfaceName: "veth123",
|
||||
linkType: "ether",
|
||||
operState: "up",
|
||||
shouldAccept: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// This simulates the filtering logic in GetPhysicalInterface
|
||||
id := tt.interfaceName
|
||||
linkType := tt.linkType
|
||||
operState := tt.operState
|
||||
|
||||
shouldAccept := (linkType == "ether" && operState == "up" &&
|
||||
id != "lo" &&
|
||||
(id[:3] == "eth" || id[:2] == "en") &&
|
||||
!containsAny(id, []string{"cni", "flannel", "docker", "br-", "veth"}))
|
||||
|
||||
if shouldAccept != tt.shouldAccept {
|
||||
t.Errorf("Interface %q filtering = %v, want %v", id, shouldAccept, tt.shouldAccept)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for interface filtering test
|
||||
func containsAny(s string, substrs []string) bool {
|
||||
for _, substr := range substrs {
|
||||
if len(substr) > 0 {
|
||||
if substr[len(substr)-1] == '-' {
|
||||
// Prefix match for things like "br-"
|
||||
if len(s) >= len(substr) && s[:len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// Contains match
|
||||
if len(s) >= len(substr) {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
469
internal/tools/yq_test.go
Normal file
469
internal/tools/yq_test.go
Normal file
@@ -0,0 +1,469 @@
|
||||
package tools
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewYQ(t *testing.T) {
|
||||
t.Run("creates YQ instance with default path", func(t *testing.T) {
|
||||
yq := NewYQ()
|
||||
if yq == nil {
|
||||
t.Fatal("NewYQ() returned nil")
|
||||
}
|
||||
if yq.yqPath == "" {
|
||||
t.Error("yqPath should not be empty")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestYQGet(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) (string, string)
|
||||
expression string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "get simple value",
|
||||
setup: func(tmpDir string) (string, string) {
|
||||
yamlContent := `name: test
|
||||
version: "1.0"
|
||||
`
|
||||
filePath := filepath.Join(tmpDir, "test.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath, ".name"
|
||||
},
|
||||
want: "test",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "get nested value",
|
||||
setup: func(tmpDir string) (string, string) {
|
||||
yamlContent := `person:
|
||||
name: John
|
||||
age: 30
|
||||
`
|
||||
filePath := filepath.Join(tmpDir, "nested.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath, ".person.name"
|
||||
},
|
||||
want: "John",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "non-existent file returns error",
|
||||
setup: func(tmpDir string) (string, string) {
|
||||
return filepath.Join(tmpDir, "nonexistent.yaml"), ".name"
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Skip if yq is not available
|
||||
if _, err := os.Stat("/usr/bin/yq"); os.IsNotExist(err) {
|
||||
t.Skip("yq not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
filePath, expression := tt.setup(tmpDir)
|
||||
|
||||
yq := NewYQ()
|
||||
got, err := yq.Get(filePath, expression)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Get() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr && got != tt.want {
|
||||
t.Errorf("Get() = %q, want %q", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestYQSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) string
|
||||
expression string
|
||||
value string
|
||||
verify func(t *testing.T, filePath string)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "set simple value",
|
||||
setup: func(tmpDir string) string {
|
||||
yamlContent := `name: old`
|
||||
filePath := filepath.Join(tmpDir, "test.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath
|
||||
},
|
||||
expression: ".name",
|
||||
value: "new",
|
||||
verify: func(t *testing.T, filePath string) {
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(string(content), "new") {
|
||||
t.Errorf("File does not contain expected value 'new': %s", content)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "set value with special characters",
|
||||
setup: func(tmpDir string) string {
|
||||
yamlContent := `message: hello`
|
||||
filePath := filepath.Join(tmpDir, "special.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath
|
||||
},
|
||||
expression: ".message",
|
||||
value: `hello "world"`,
|
||||
verify: func(t *testing.T, filePath string) {
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Should contain escaped quotes
|
||||
if !strings.Contains(string(content), "hello") {
|
||||
t.Errorf("File does not contain expected value: %s", content)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "expression without leading dot gets dot prepended",
|
||||
setup: func(tmpDir string) string {
|
||||
yamlContent := `key: value`
|
||||
filePath := filepath.Join(tmpDir, "nodot.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath
|
||||
},
|
||||
expression: "key",
|
||||
value: "newvalue",
|
||||
verify: func(t *testing.T, filePath string) {
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(string(content), "newvalue") {
|
||||
t.Errorf("File does not contain expected value: %s", content)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Skip if yq is not available
|
||||
if _, err := os.Stat("/usr/bin/yq"); os.IsNotExist(err) {
|
||||
t.Skip("yq not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
filePath := tt.setup(tmpDir)
|
||||
|
||||
yq := NewYQ()
|
||||
err := yq.Set(filePath, tt.expression, tt.value)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Set() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr && tt.verify != nil {
|
||||
tt.verify(t, filePath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestYQDelete(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) string
|
||||
expression string
|
||||
verify func(t *testing.T, filePath string)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "delete simple key",
|
||||
setup: func(tmpDir string) string {
|
||||
yamlContent := `name: test
|
||||
version: "1.0"
|
||||
`
|
||||
filePath := filepath.Join(tmpDir, "delete.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath
|
||||
},
|
||||
expression: ".name",
|
||||
verify: func(t *testing.T, filePath string) {
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if strings.Contains(string(content), "name:") {
|
||||
t.Errorf("Key 'name' was not deleted: %s", content)
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Skip if yq is not available
|
||||
if _, err := os.Stat("/usr/bin/yq"); os.IsNotExist(err) {
|
||||
t.Skip("yq not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
filePath := tt.setup(tmpDir)
|
||||
|
||||
yq := NewYQ()
|
||||
err := yq.Delete(filePath, tt.expression)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Delete() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr && tt.verify != nil {
|
||||
tt.verify(t, filePath)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestYQValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid YAML",
|
||||
setup: func(tmpDir string) string {
|
||||
yamlContent := `name: test
|
||||
version: "1.0"
|
||||
nested:
|
||||
key: value
|
||||
`
|
||||
filePath := filepath.Join(tmpDir, "valid.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid YAML",
|
||||
setup: func(tmpDir string) string {
|
||||
invalidYaml := `name: test
|
||||
invalid indentation
|
||||
version: "1.0"
|
||||
`
|
||||
filePath := filepath.Join(tmpDir, "invalid.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(invalidYaml), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non-existent file",
|
||||
setup: func(tmpDir string) string {
|
||||
return filepath.Join(tmpDir, "nonexistent.yaml")
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Skip if yq is not available
|
||||
if _, err := os.Stat("/usr/bin/yq"); os.IsNotExist(err) {
|
||||
t.Skip("yq not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
filePath := tt.setup(tmpDir)
|
||||
|
||||
yq := NewYQ()
|
||||
err := yq.Validate(filePath)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestYQExec(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) (string, []string)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "exec with valid args",
|
||||
setup: func(tmpDir string) (string, []string) {
|
||||
yamlContent := `name: test`
|
||||
filePath := filepath.Join(tmpDir, "exec.yaml")
|
||||
if err := os.WriteFile(filePath, []byte(yamlContent), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return filePath, []string{"eval", ".name", filePath}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Skip if yq is not available
|
||||
if _, err := os.Stat("/usr/bin/yq"); os.IsNotExist(err) {
|
||||
t.Skip("yq not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
_, args := tt.setup(tmpDir)
|
||||
|
||||
yq := NewYQ()
|
||||
output, err := yq.Exec(args...)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Exec() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr && len(output) == 0 {
|
||||
t.Error("Exec() returned empty output")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanYQOutput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "removes trailing newline",
|
||||
input: "value\n",
|
||||
want: "value",
|
||||
},
|
||||
{
|
||||
name: "converts null to empty string",
|
||||
input: "null",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "removes whitespace",
|
||||
input: " value \n",
|
||||
want: "value",
|
||||
},
|
||||
{
|
||||
name: "handles empty string",
|
||||
input: "",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "handles multiple newlines",
|
||||
input: "value\n\n",
|
||||
want: "value",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := CleanYQOutput(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("CleanYQOutput(%q) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestYQMerge(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) (string, string, string)
|
||||
verify func(t *testing.T, outputPath string)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "merge two files",
|
||||
setup: func(tmpDir string) (string, string, string) {
|
||||
file1 := filepath.Join(tmpDir, "file1.yaml")
|
||||
file2 := filepath.Join(tmpDir, "file2.yaml")
|
||||
output := filepath.Join(tmpDir, "output.yaml")
|
||||
|
||||
if err := os.WriteFile(file1, []byte("key1: value1\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(file2, []byte("key2: value2\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return file1, file2, output
|
||||
},
|
||||
verify: func(t *testing.T, outputPath string) {
|
||||
if _, err := os.Stat(outputPath); os.IsNotExist(err) {
|
||||
t.Error("Output file was not created")
|
||||
}
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Skip if yq is not available
|
||||
if _, err := os.Stat("/usr/bin/yq"); os.IsNotExist(err) {
|
||||
t.Skip("yq not installed, skipping test")
|
||||
}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
file1, file2, output := tt.setup(tmpDir)
|
||||
|
||||
yq := NewYQ()
|
||||
err := yq.Merge(file1, file2, output)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Merge() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr && tt.verify != nil {
|
||||
tt.verify(t, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user