Skip to content

Commit

Permalink
Improve key value pairs consistency in logging (II)
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Aug 17, 2022
1 parent 09744f5 commit bbe7c2f
Show file tree
Hide file tree
Showing 35 changed files with 96 additions and 124 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,10 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
if configOwner == nil {
return ctrl.Result{}, nil
}
log = log.WithValues(configOwner.LowerCamelCaseKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())
log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())

log = log.WithValues("Cluster", klog.KRef(configOwner.GetNamespace(), configOwner.ClusterName()))
ctx = ctrl.LoggerInto(ctx, log)

// Lookup the cluster the config owner is associated with
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
Expand All @@ -191,8 +194,6 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, err
}

ctx = ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))

if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
Expand Down Expand Up @@ -1001,7 +1002,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope
if !apierrors.IsAlreadyExists(err) {
return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", klog.KObj(secret))
log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "Secret", klog.KObj(secret))
if err := r.Client.Update(ctx, secret); err != nil {
return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex {
func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool {
sema := newSemaphore()
cmName := configMapName(cluster.Name)
log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName))
log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName))
err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cmName,
Expand Down Expand Up @@ -113,7 +113,7 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool {
sema := newSemaphore()
cmName := configMapName(cluster.Name)
log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName))
log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName))
err := c.client.Get(ctx, client.ObjectKey{
Namespace: cluster.Namespace,
Name: cmName,
Expand Down
7 changes: 0 additions & 7 deletions bootstrap/util/configowner.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"sigs.k8s.io/cluster-api/controllers/external"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/util"
)

// ConfigOwner provides a data interface for different config owner types.
Expand Down Expand Up @@ -124,12 +123,6 @@ func (co ConfigOwner) KubernetesVersion() string {
return version
}

// LowerCamelCaseKind mirrors how controller runtime formats the object's kind when used as a logging key
// for the object being reconciled.
func (co ConfigOwner) LowerCamelCaseKind() string {
return util.LowerCamelCaseKind(co.Unstructured)
}

// GetConfigOwner returns the Unstructured object owning the current resource.
func GetConfigOwner(ctx context.Context, c client.Client, obj metav1.Object) (*ConfigOwner, error) {
allowedGKs := []schema.GroupKind{
Expand Down
3 changes: 2 additions & 1 deletion cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand Down Expand Up @@ -540,7 +541,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err
setClusterPauseBackoff := newWriteBackoff()
for i := range clusters {
cluster := clusters[i]
log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "cluster", cluster.identity.Name, "namespace", cluster.identity.Namespace)
log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "Cluster", klog.KRef(cluster.identity.Namespace, cluster.identity.Name))

// Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions.
if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error {
Expand Down
6 changes: 3 additions & 3 deletions controllers/remote/cluster_cache_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ func (t *ClusterCacheTracker) deleteAccessor(_ context.Context, cluster client.O
return
}

log := t.log.WithValues("cluster", klog.KRef(cluster.Namespace, cluster.Name))
log := t.log.WithValues("Cluster", klog.KRef(cluster.Namespace, cluster.Name))
log.V(2).Info("Deleting clusterAccessor")
log.V(4).Info("Stopping cache")
a.cache.Stop()
Expand Down Expand Up @@ -396,7 +396,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error
}

if a.watches.Has(input.Name) {
t.log.V(6).Info("Watch already exists", "namespace", klog.KRef(input.Cluster.Namespace, ""), "cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name)
t.log.V(6).Info("Watch already exists", "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name)
return nil
}

Expand Down Expand Up @@ -501,7 +501,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health
// NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case
// happens when the cache is explicitly stopped.F
if err != nil && err != wait.ErrWaitTimeout {
t.log.Error(err, "Error health checking cluster", "cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
t.deleteAccessor(ctx, in.cluster)
}
}
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
log.Info("Cluster Controller has not yet set OwnerRef")
return ctrl.Result{}, nil
}
log = log.WithValues("cluster", klog.KObj(cluster))
log = log.WithValues("Cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

if annotations.IsPaused(cluster, kcp) {
Expand Down Expand Up @@ -463,7 +463,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
var errs []error
for i := range machinesToDelete {
m := machinesToDelete[i]
logger := log.WithValues("machine", klog.KObj(m))
logger := log.WithValues("Machine", klog.KObj(m))
if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to cleanup owned machine")
errs = append(errs, err)
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context,

func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, controllerOwnerRef metav1.OwnerReference) error {
log := ctrl.LoggerFrom(ctx)
log.Info("Adopting KubeConfig secret created by v1alpha2 controllers", "secret", klog.KObj(configSecret))
log.Info("Adopting KubeConfig secret created by v1alpha2 controllers", "Secret", klog.KObj(configSecret))

patch, err := patch.NewHelper(configSecret, r.Client)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
return ctrl.Result{}, err
}

logger = logger.WithValues("machine", klog.KObj(machineToDelete))
logger = logger.WithValues("Machine", klog.KObj(machineToDelete))
if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete control plane machine")
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown",
Expand Down
3 changes: 1 addition & 2 deletions controlplane/kubeadm/internal/controllers/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"context"

"github.com/pkg/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -34,7 +33,7 @@ import (
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
// resource status subresourcs up-to-date.
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error {
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))
log := ctrl.LoggerFrom(ctx)

selector := collections.ControlPlaneSelectorForCluster(cluster.Name)
// Copy label selector to its status counterpart in string format.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type MailgunClusterReconciler struct {
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=mailgunclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=mailgunclusters/status,verbs=get;update;patch

func (r *MailgunClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.Log.WithValues("mailguncluster", req.NamespacedName)

Expand Down Expand Up @@ -86,7 +86,7 @@ Reconcile is only passed a name, not an object, so let's retrieve ours.
Here's a naive example:

```
func (r *MailgunClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
_ = r.Log.WithValues("mailguncluster", req.NamespacedName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func (r *ClusterResourceSetReconciler) Reconcile(ctx context.Context, req ctrl.R

clusters, err := r.getClustersByClusterResourceSetSelector(ctx, clusterResourceSet)
if err != nil {
log.Error(err, "Failed fetching clusters that matches ClusterResourceSet labels", "clusterResourceSet", klog.KObj(clusterResourceSet))
log.Error(err, "Failed fetching clusters that matches ClusterResourceSet labels", "ClusterResourceSet", klog.KObj(clusterResourceSet))
conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ClusterMatchFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, err
}
Expand All @@ -161,9 +161,9 @@ func (r *ClusterResourceSetReconciler) Reconcile(ctx context.Context, req ctrl.R

// reconcileDelete removes the deleted ClusterResourceSet from all the ClusterResourceSetBindings it is added to.
func (r *ClusterResourceSetReconciler) reconcileDelete(ctx context.Context, clusters []*clusterv1.Cluster, crs *addonsv1.ClusterResourceSet) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)

for _, cluster := range clusters {
log := ctrl.LoggerFrom(ctx, "Cluster", klog.KObj(cluster))

clusterResourceSetBinding := &addonsv1.ClusterResourceSetBinding{}
clusterResourceSetBindingKey := client.ObjectKey{
Namespace: cluster.Namespace,
Expand Down Expand Up @@ -237,7 +237,8 @@ func (r *ClusterResourceSetReconciler) getClustersByClusterResourceSetSelector(c
// It applies resources best effort and continue on scenarios like: unsupported resource types, failure during creation, missing resources.
// TODO: If a resource already exists in the cluster but not applied by ClusterResourceSet, the resource will be updated ?
func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster, clusterResourceSet *addonsv1.ClusterResourceSet) error {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "Cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
Expand Down
5 changes: 4 additions & 1 deletion exp/internal/controllers/machinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,12 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return ctrl.Result{}, err
}

log = log.WithValues("Cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName))
ctx = ctrl.LoggerInto(ctx, log)

cluster, err := util.GetClusterByName(ctx, r.Client, mp.ObjectMeta.Namespace, mp.Spec.ClusterName)
if err != nil {
log.Error(err, "Failed to get Cluster for MachinePool.", "machinePool", klog.KObj(mp), "cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName))
log.Error(err, "Failed to get Cluster for MachinePool.", "MachinePool", klog.KObj(mp), "Cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName))
return ctrl.Result{}, errors.Wrapf(err, "failed to get cluster %q for machinepool %q in namespace %q",
mp.Spec.ClusterName, mp.Name, mp.Namespace)
}
Expand Down
11 changes: 1 addition & 10 deletions exp/internal/controllers/machinepool_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

Expand All @@ -48,7 +47,7 @@ type getNodeReferencesResult struct {
}

func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx)
// Check that the MachinePool hasn't been deleted or in the process.
if !mp.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
Expand All @@ -60,14 +59,6 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
return ctrl.Result{}, nil
}

// Check that Cluster isn't nil.
if cluster == nil {
log.V(2).Info("MachinePool doesn't have a linked cluster, won't assign NodeRef")
return ctrl.Result{}, nil
}

log = log.WithValues("cluster", klog.KObj(cluster))

// Check that the MachinePool has valid ProviderIDList.
if len(mp.Spec.ProviderIDList) == 0 && (mp.Spec.Replicas == nil || *mp.Spec.Replicas != 0) {
log.V(2).Info("MachinePool doesn't have any ProviderIDs yet")
Expand Down
5 changes: 2 additions & 3 deletions exp/internal/controllers/machinepool_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
Expand Down Expand Up @@ -167,7 +166,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster *

// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool.
func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))
log := ctrl.LoggerFrom(ctx)

// Call generic external reconciler if we have an external reference.
var bootstrapConfig *unstructured.Unstructured
Expand Down Expand Up @@ -227,7 +226,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster

// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool.
func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))
log := ctrl.LoggerFrom(ctx)

// Call generic external reconciler.
infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef)
Expand Down
4 changes: 2 additions & 2 deletions hack/observability/promtail/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ config:
- json:
expressions:
controller:
cluster: join('/',[cluster.namespace,cluster.name])
machine: join('/',[machine.namespace,machine.name])
cluster: join('/',[Cluster.namespace,Cluster.name])
machine: join('/',[Machine.namespace,Machine.name])
- labels:
controller:
cluster:
Expand Down
3 changes: 0 additions & 3 deletions internal/controllers/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand Down Expand Up @@ -114,8 +113,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
return ctrl.Result{}, err
}

ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))

// Return early if the object or Cluster is paused.
if annotations.IsPaused(cluster, cluster) {
log.Info("Reconciliation is paused for this object")
Expand Down
Loading

0 comments on commit bbe7c2f

Please sign in to comment.