Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ rules:
- apiGroups:
- ""
resources:
- nodes
- pods
verbs:
- get
Expand Down
77 changes: 50 additions & 27 deletions internal/controller/logger_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ type LoggerReconciler struct {
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch
// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down Expand Up @@ -84,7 +85,13 @@ func rsLine(rs appsv1.ReplicaSet) string {
fmt.Sprintf("%d", rs.Status.Replicas)
}

func (r *LoggerReconciler) loggerRequestsForPod(
func nodeLine(node corev1.Node) string {
return "node/" + node.Name + " " +
node.Namespace + " " +
fmt.Sprintf("%v", node.Status.Capacity)
Comment on lines +88 to +91
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

node.Status.Capacity is a map; formatting it with %v results in non-deterministic key ordering in Go, making logs noisy and hard to diff. Consider logging a stable subset (cpu/memory) or formatting the ResourceList deterministically.

Suggested change
func nodeLine(node corev1.Node) string {
return "node/" + node.Name + " " +
node.Namespace + " " +
fmt.Sprintf("%v", node.Status.Capacity)
func formatResourceList(rl corev1.ResourceList) string {
if rl == nil {
return "{}"
}
keys := make([]string, 0, len(rl))
for k := range rl {
keys = append(keys, string(k))
}
slices.Sort(keys)
var b strings.Builder
b.WriteString("{")
for i, k := range keys {
if i > 0 {
b.WriteString(" ")
}
b.WriteString(k)
b.WriteString(":")
qty := rl[corev1.ResourceName(k)]
b.WriteString(qty.String())
}
b.WriteString("}")
return b.String()
}
func nodeLine(node corev1.Node) string {
return "node/" + node.Name + " " +
node.Namespace + " " +
formatResourceList(node.Status.Capacity)

Copilot uses AI. Check for mistakes.
}

Comment on lines +90 to +93
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nodeLine includes node.Namespace, but Nodes are cluster-scoped so this will always be empty and leads to confusing output (extra spacing / misleading “namespace” field). Consider removing the namespace from the node output or replacing it with a meaningful field (e.g., node role/labels) if you need a second column.

Suggested change
node.Namespace + " " +
fmt.Sprintf("%v", node.Status.Capacity)
}
fmt.Sprintf("%v", node.Status.Capacity)
}

Copilot uses AI. Check for mistakes.
func (r *LoggerReconciler) enqueueAllLoggers(
ctx context.Context,
obj client.Object,
) []ctrl.Request {
Expand Down Expand Up @@ -112,28 +119,6 @@ func printKubectl(line string) {
fmt.Println(line)
}

func (r *LoggerReconciler) loggerRequestsForDeployment(
ctx context.Context,
obj client.Object,
) []ctrl.Request {

var loggerList loggerv1.LoggerList
if err := r.List(ctx, &loggerList); err != nil {
return nil
}

reqs := make([]ctrl.Request, 0, len(loggerList.Items))
for _, logger := range loggerList.Items {
reqs = append(reqs, ctrl.Request{
NamespacedName: client.ObjectKey{
Name: logger.Name,
Namespace: logger.Namespace,
},
})
}
return reqs
}

func isSystemNamespace(ns string) bool {
switch ns {
case "kube-system", "kube-public", "kube-node-lease":
Expand Down Expand Up @@ -171,6 +156,10 @@ func shouldLogReplicaSets(logger *loggerv1.Logger) bool {
return slices.Contains(logger.Spec.Resources, "replicasets")
}

func shouldLogNodes(logger *loggerv1.Logger) bool {
return slices.Contains(logger.Spec.Resources, "nodes")
}
Comment on lines +159 to +161
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are controller tests covering namespace vs cluster scope for pods/deployments/replicasets, but no tests covering the new nodes resource. Adding a test case for Resources: ["nodes"] (and especially for Namespace scope) would help catch issues like accidentally applying namespace list options to node listing.

Copilot uses AI. Check for mistakes.

func (r *LoggerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var observed int32 = 0
l := logf.FromContext(ctx)
Expand Down Expand Up @@ -308,6 +297,38 @@ func (r *LoggerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr

}

if shouldLogNodes(&logger) {
var nodelist corev1.NodeList
if err := r.List(ctx, &nodelist, opts...); err != nil {
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

opts := listOptionsFromScope(&logger) may include client.InNamespace(...) for namespace-scoped loggers. Passing that into r.List for corev1.NodeList is not semantically valid (Nodes are cluster-scoped) and can lead to incorrect behavior or errors. Consider listing nodes without namespace list options (or skipping node logging when scope type is Namespace).

Suggested change
if err := r.List(ctx, &nodelist, opts...); err != nil {
if err := r.List(ctx, &nodelist); err != nil {

Copilot uses AI. Check for mistakes.
return ctrl.Result{}, err
}

l.V(1).Info("nodes observed", "count", len(nodelist.Items))
printKubectl("NODES")
for _, node := range nodelist.Items {
observed++

// ---------- kubectl-style ----------
printKubectl(nodeLine(node))

// ---------- deep logs ----------
l.V(1).Info(
"node details",
"name", node.Name,
"kubeletVersion", node.Status.NodeInfo.KubeletVersion,
"osImage", node.Status.NodeInfo.OSImage,
"architecture", node.Status.NodeInfo.Architecture,
"containerRuntime", node.Status.NodeInfo.ContainerRuntimeVersion,
"allocatableCPU", node.Status.Allocatable.Cpu().String(),
"allocatableMemory", node.Status.Allocatable.Memory().String(),
"capacity", node.Status.Capacity,
"conditions", node.Status.Conditions,
"addresses", node.Status.Addresses,
)
}

}

now := metav1.Now()

if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
Expand Down Expand Up @@ -345,11 +366,13 @@ func (r *LoggerReconciler) SetupWithManager(mgr ctrl.Manager) error {
).
Watches(
&corev1.Pod{},
handler.EnqueueRequestsFromMapFunc(r.loggerRequestsForPod),
handler.EnqueueRequestsFromMapFunc(r.enqueueAllLoggers),
).Watches(
&appsv1.Deployment{},
handler.EnqueueRequestsFromMapFunc(r.loggerRequestsForDeployment),
).
Named("logger").
handler.EnqueueRequestsFromMapFunc(r.enqueueAllLoggers),
).Watches(
&corev1.Node{},
handler.EnqueueRequestsFromMapFunc(r.enqueueAllLoggers),
).Named("logger").
Comment on lines +373 to +376
Copy link

Copilot AI Mar 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding a watch on corev1.Node{} will enqueue all Logger reconciles on every Node update. Node status changes (heartbeats/conditions) can be very frequent, which may cause reconcile/log storms. Consider removing the Node watch and relying on the existing periodic RequeueAfter, or add predicates to filter Node events down to meaningful changes.

Copilot uses AI. Check for mistakes.
Complete(r)
}
Loading