Add node details to cluster status.
This commit is contained in:
@@ -39,15 +39,24 @@ type ClusterConfig struct {
|
|||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeStatus represents the health status of a single node
|
||||||
|
type NodeStatus struct {
|
||||||
|
Hostname string `json:"hostname"`
|
||||||
|
Ready bool `json:"ready"`
|
||||||
|
KubernetesReady bool `json:"kubernetes_ready"`
|
||||||
|
Role string `json:"role"` // "control-plane" or "worker"
|
||||||
|
}
|
||||||
|
|
||||||
// ClusterStatus represents cluster health and status
|
// ClusterStatus represents cluster health and status
|
||||||
type ClusterStatus struct {
|
type ClusterStatus struct {
|
||||||
Status string `json:"status"` // ready, pending, error
|
Status string `json:"status"` // ready, pending, error
|
||||||
Nodes int `json:"nodes"`
|
Nodes int `json:"nodes"`
|
||||||
ControlPlaneNodes int `json:"control_plane_nodes"`
|
ControlPlaneNodes int `json:"control_plane_nodes"`
|
||||||
WorkerNodes int `json:"worker_nodes"`
|
WorkerNodes int `json:"worker_nodes"`
|
||||||
KubernetesVersion string `json:"kubernetes_version"`
|
KubernetesVersion string `json:"kubernetes_version"`
|
||||||
TalosVersion string `json:"talos_version"`
|
TalosVersion string `json:"talos_version"`
|
||||||
Services map[string]string `json:"services"`
|
Services map[string]string `json:"services"`
|
||||||
|
NodeStatuses map[string]NodeStatus `json:"node_statuses,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTalosDir returns the talos directory for an instance
|
// GetTalosDir returns the talos directory for an instance
|
||||||
@@ -495,6 +504,7 @@ func (m *Manager) GetStatus(instanceName string) (*ClusterStatus, error) {
|
|||||||
var nodesResult struct {
|
var nodesResult struct {
|
||||||
Items []struct {
|
Items []struct {
|
||||||
Metadata struct {
|
Metadata struct {
|
||||||
|
Name string `json:"name"`
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
} `json:"metadata"`
|
} `json:"metadata"`
|
||||||
Status struct {
|
Status struct {
|
||||||
@@ -539,20 +549,38 @@ func (m *Manager) GetStatus(instanceName string) (*ClusterStatus, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count control plane and worker nodes
|
// Count control plane and worker nodes, and populate per-node status
|
||||||
|
status.NodeStatuses = make(map[string]NodeStatus)
|
||||||
|
|
||||||
for _, node := range nodesResult.Items {
|
for _, node := range nodesResult.Items {
|
||||||
|
hostname := node.Metadata.Name // K8s node name is hostname
|
||||||
|
|
||||||
|
role := "worker"
|
||||||
if _, isControl := node.Metadata.Labels["node-role.kubernetes.io/control-plane"]; isControl {
|
if _, isControl := node.Metadata.Labels["node-role.kubernetes.io/control-plane"]; isControl {
|
||||||
|
role = "control-plane"
|
||||||
status.ControlPlaneNodes++
|
status.ControlPlaneNodes++
|
||||||
} else {
|
} else {
|
||||||
status.WorkerNodes++
|
status.WorkerNodes++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if node is ready
|
// Check if node is ready
|
||||||
|
nodeReady := false
|
||||||
for _, cond := range node.Status.Conditions {
|
for _, cond := range node.Status.Conditions {
|
||||||
if cond.Type == "Ready" && cond.Status != "True" {
|
if cond.Type == "Ready" {
|
||||||
status.Status = "degraded"
|
nodeReady = (cond.Status == "True")
|
||||||
|
if !nodeReady {
|
||||||
|
status.Status = "degraded"
|
||||||
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
status.NodeStatuses[hostname] = NodeStatus{
|
||||||
|
Hostname: hostname,
|
||||||
|
Ready: true, // In K8s means it's reachable
|
||||||
|
KubernetesReady: nodeReady,
|
||||||
|
Role: role,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check basic service status
|
// Check basic service status
|
||||||
|
|||||||
Reference in New Issue
Block a user