From eb4eaebd8d8701745af8355b1eadd9c2390d6caa Mon Sep 17 00:00:00 2001 From: Jonathan Tong Date: Wed, 22 Feb 2023 16:47:48 -0500 Subject: [PATCH] Implement MachinePool Machines in CAPI, CAPD, and clusterctl --- api/v1beta1/common_types.go | 12 + api/v1beta1/machine_webhook.go | 52 +- cmd/clusterctl/client/tree/discovery.go | 48 +- config/webhook/manifests.yaml | 2 + exp/api/v1beta1/machinepool_types.go | 4 + .../machinepool_controller_phases.go | 279 ++++++++++ .../machinepool_controller_phases_test.go | 493 ++++++++++++++++++ exp/internal/controllers/suite_test.go | 6 +- .../controllers/machine/machine_controller.go | 8 + .../machine/machine_controller_noderef.go | 1 + .../docker/api/v1alpha3/conversion.go | 30 +- .../api/v1alpha3/zz_generated.conversion.go | 16 +- .../docker/api/v1alpha4/conversion.go | 30 +- .../api/v1alpha4/zz_generated.conversion.go | 16 +- .../docker/api/v1beta1/dockermachine_types.go | 7 + ...e.cluster.x-k8s.io_dockermachinepools.yaml | 50 ++ ...cture.cluster.x-k8s.io_dockermachines.yaml | 7 + ...uster.x-k8s.io_dockermachinetemplates.yaml | 8 + .../docker/config/webhook/manifests.yaml | 22 + .../docker/exp/api/v1alpha3/conversion.go | 33 +- .../api/v1alpha3/zz_generated.conversion.go | 9 +- .../docker/exp/api/v1alpha4/conversion.go | 33 +- .../api/v1alpha4/zz_generated.conversion.go | 9 +- .../api/v1beta1/dockermachinepool_types.go | 20 +- .../api/v1beta1/dockermachinepool_webhook.go | 27 +- .../exp/api/v1beta1/zz_generated.deepcopy.go | 1 + .../dockermachinepool_controller.go | 206 +++++++- .../docker/exp/internal/docker/nodepool.go | 104 ++-- .../controllers/dockermachine_controller.go | 149 +++--- 29 files changed, 1474 insertions(+), 208 deletions(-) diff --git a/api/v1beta1/common_types.go b/api/v1beta1/common_types.go index 9c201109e613..d9a7e6dab545 100644 --- a/api/v1beta1/common_types.go +++ b/api/v1beta1/common_types.go @@ -57,6 +57,18 @@ const ( // update that disallows a pre-existing Cluster to be populated with Topology information and Class. ClusterTopologyUnsafeUpdateClassNameAnnotation = "unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check" + // MachinePoolOwnedLabel is the label set on Machines and InfraMachines linked to a MachinePool. + MachinePoolOwnedLabel = "machinepool.cluster.x-k8s.io/owned" + + // FallbackMachineLabel indicates that a Machine belongs to a MachinePool that does not support MachinePool Machines. + // As such, these Machines exist to create a consistent user experience and will not have an infrastructure reference. The user will + // also be prevented from deleting these Machines. + FallbackMachineLabel = "machinepool.cluster.x-k8s.io/fallback-machine" + + // AllowFallbackMachineDeletionAnnotation is used to bypass the prevention of deletion of fallback machines. The controller will + // apply this annotation before deletion. + AllowFallbackMachineDeletionAnnotation = "machinepool.cluster.x-k8s.io/allow-fallback-deletion" + // ProviderNameLabel is the label set on components in the provider manifest. // This label allows to easily identify all the components belonging to a provider; the clusterctl // tool uses this label for implementing provider's lifecycle operations. diff --git a/api/v1beta1/machine_webhook.go b/api/v1beta1/machine_webhook.go index 4a21872eaa51..ec41871e32f8 100644 --- a/api/v1beta1/machine_webhook.go +++ b/api/v1beta1/machine_webhook.go @@ -39,8 +39,8 @@ func (m *Machine) SetupWebhookWithManager(mgr ctrl.Manager) error { Complete() } -// +kubebuilder:webhook:verbs=create;update,path=/validate-cluster-x-k8s-io-v1beta1-machine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=cluster.x-k8s.io,resources=machines,versions=v1beta1,name=validation.machine.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -// +kubebuilder:webhook:verbs=create;update,path=/mutate-cluster-x-k8s-io-v1beta1-machine,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=cluster.x-k8s.io,resources=machines,versions=v1beta1,name=default.machine.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update;delete,path=/validate-cluster-x-k8s-io-v1beta1-machine,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=cluster.x-k8s.io,resources=machines,versions=v1beta1,name=validation.machine.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update;delete,path=/mutate-cluster-x-k8s-io-v1beta1-machine,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=cluster.x-k8s.io,resources=machines,versions=v1beta1,name=default.machine.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 var _ webhook.Validator = &Machine{} var _ webhook.Defaulter = &Machine{} @@ -57,7 +57,10 @@ func (m *Machine) Default() { } if m.Spec.InfrastructureRef.Namespace == "" { - m.Spec.InfrastructureRef.Namespace = m.Namespace + // Don't autofill namespace for MachinePool Machines since the infraRef will be populated by the MachinePool controller. + if _, isMachinePoolMachine := m.Labels[MachinePoolOwnedLabel]; !isMachinePoolMachine { + m.Spec.InfrastructureRef.Namespace = m.Namespace + } } if m.Spec.Version != nil && !strings.HasPrefix(*m.Spec.Version, "v") { @@ -86,6 +89,12 @@ func (m *Machine) ValidateUpdate(old runtime.Object) error { // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (m *Machine) ValidateDelete() error { + if _, isFallbackMachine := m.Labels[FallbackMachineLabel]; isFallbackMachine { + if _, allowDelete := m.Annotations[AllowFallbackMachineDeletionAnnotation]; !allowDelete { + return apierrors.NewBadRequest("fallback MachinePool Machine cannot be deleted by user") + } + } + return nil } @@ -93,13 +102,16 @@ func (m *Machine) validate(old *Machine) error { var allErrs field.ErrorList specPath := field.NewPath("spec") if m.Spec.Bootstrap.ConfigRef == nil && m.Spec.Bootstrap.DataSecretName == nil { - allErrs = append( - allErrs, - field.Required( - specPath.Child("bootstrap", "data"), - "expected either spec.bootstrap.dataSecretName or spec.bootstrap.configRef to be populated", - ), - ) + // MachinePool Machines don't have a bootstrap configRef, so don't require it. The bootstrap config is instead owned by the MachinePool. + if _, isMachinePoolMachine := m.Labels[MachinePoolOwnedLabel]; !isMachinePoolMachine { + allErrs = append( + allErrs, + field.Required( + specPath.Child("bootstrap", "data"), + "expected either spec.bootstrap.dataSecretName or spec.bootstrap.configRef to be populated", + ), + ) + } } if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.Namespace != m.Namespace { @@ -113,15 +125,17 @@ func (m *Machine) validate(old *Machine) error { ) } - if m.Spec.InfrastructureRef.Namespace != m.Namespace { - allErrs = append( - allErrs, - field.Invalid( - specPath.Child("infrastructureRef", "namespace"), - m.Spec.InfrastructureRef.Namespace, - "must match metadata.namespace", - ), - ) + if _, isMachinePoolMachine := m.Labels[MachinePoolOwnedLabel]; !isMachinePoolMachine { + if m.Spec.InfrastructureRef.Namespace != m.Namespace { + allErrs = append( + allErrs, + field.Invalid( + specPath.Child("infrastructureRef", "namespace"), + m.Spec.InfrastructureRef.Namespace, + "must match metadata.namespace", + ), + ) + } } if old != nil && old.Spec.ClusterName != m.Spec.ClusterName { diff --git a/cmd/clusterctl/client/tree/discovery.go b/cmd/clusterctl/client/tree/discovery.go index e16b250ac9bc..939ffb3a28fc 100644 --- a/cmd/clusterctl/client/tree/discovery.go +++ b/cmd/clusterctl/client/tree/discovery.go @@ -112,8 +112,10 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt tree.Add(m, machineInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true)) } - if machineBootstrap, err := external.Get(ctx, c, m.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil { - tree.Add(m, machineBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true)) + if m.Spec.Bootstrap.ConfigRef != nil { + if machineBootstrap, err := external.Get(ctx, c, m.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil { + tree.Add(m, machineBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true)) + } } } } @@ -141,25 +143,25 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt if err != nil { return nil, err } - - // Handles orphan machines. - if len(machineMap) < len(machinesList.Items) { - other := VirtualObject(cluster.Namespace, "OtherGroup", "Other") - tree.Add(workers, other) - - for i := range machinesList.Items { - m := &machinesList.Items[i] - if _, ok := machineMap[m.Name]; ok { - continue - } - addMachineFunc(other, m) - } - } } if len(machinePoolList.Items) > 0 { // Add MachinePool objects tree.Add(cluster, workers) - addMachinePoolsToObjectTree(ctx, c, cluster.Namespace, workers, machinePoolList, tree) + addMachinePoolsToObjectTree(ctx, c, cluster.Namespace, workers, machinePoolList, machinesList, tree, addMachineFunc) + } + + // Handles orphan machines. + if len(machineMap) < len(machinesList.Items) { + other := VirtualObject(cluster.Namespace, "OtherGroup", "Other") + tree.Add(workers, other) + + for i := range machinesList.Items { + m := &machinesList.Items[i] + if _, ok := machineMap[m.Name]; ok { + continue + } + addMachineFunc(other, m) + } } return tree, nil @@ -263,10 +265,10 @@ func addMachineDeploymentToObjectTree(ctx context.Context, c client.Client, clus return nil } -func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, namespace string, workers *unstructured.Unstructured, machinePoolList *expv1.MachinePoolList, tree *ObjectTree) { +func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, namespace string, workers *unstructured.Unstructured, machinePoolList *expv1.MachinePoolList, machinesList *clusterv1.MachineList, tree *ObjectTree, addMachineFunc func(parent client.Object, m *clusterv1.Machine)) { for i := range machinePoolList.Items { mp := &machinePoolList.Items[i] - _, visible := tree.Add(workers, mp) + _, visible := tree.Add(workers, mp, GroupingObject(true)) if visible { if machinePoolBootstrap, err := external.Get(ctx, c, mp.Spec.Template.Spec.Bootstrap.ConfigRef, namespace); err == nil { @@ -274,9 +276,15 @@ func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, namespace } if machinePoolInfra, err := external.Get(ctx, c, &mp.Spec.Template.Spec.InfrastructureRef, namespace); err == nil { - tree.Add(mp, machinePoolInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true)) + tree.Add(mp, machinePoolInfra, ObjectMetaName("MachinePoolInfrastructure"), NoEcho(true)) } } + + machines := selectMachinesControlledBy(machinesList, mp) + + for _, m := range machines { + addMachineFunc(mp, m) + } } } diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index f38f2a1e673f..89cb36424192 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -24,6 +24,7 @@ webhooks: operations: - CREATE - UPDATE + - DELETE resources: - machines sideEffects: None @@ -229,6 +230,7 @@ webhooks: operations: - CREATE - UPDATE + - DELETE resources: - machines sideEffects: None diff --git a/exp/api/v1beta1/machinepool_types.go b/exp/api/v1beta1/machinepool_types.go index 2634affe1db3..b28f61638a63 100644 --- a/exp/api/v1beta1/machinepool_types.go +++ b/exp/api/v1beta1/machinepool_types.go @@ -27,6 +27,10 @@ import ( const ( // MachinePoolFinalizer is used to ensure deletion of dependencies (nodes, infra). MachinePoolFinalizer = "machinepool.cluster.x-k8s.io" + + // MachinePoolNameLabel is the label indicating the name of the MachinePool a Machine is controleld by. + // Note: The value of this label may be a hash if the MachinePool name is longer than 63 characters. + MachinePoolNameLabel = "cluster.x-k8s.io/pool-name" ) // ANCHOR: MachinePoolSpec diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index cb60b4429569..023b1f2a87ad 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -25,9 +25,11 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" @@ -36,6 +38,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + capilabels "sigs.k8s.io/cluster-api/internal/labels" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -284,6 +287,10 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), ) + if err := r.reconcileMachinePoolMachines(ctx, mp, infraConfig); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile MachinePool machines for MachinePool `%s`", mp.Name) + } + if !mp.Status.InfrastructureReady { log.Info("Infrastructure provider is not ready, requeuing") return ctrl.Result{RequeueAfter: externalReadyWait}, nil @@ -317,3 +324,275 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu return ctrl.Result{}, nil } + +// reconcileMachinePoolMachines reconciles MachinePool Machines associated with a MachinePool. +func (r *MachinePoolReconciler) reconcileMachinePoolMachines(ctx context.Context, mp *expv1.MachinePool, infraMachinePool *unstructured.Unstructured) error { + log := ctrl.LoggerFrom(ctx) + + var noKind bool + var noSelector bool + + var infraMachineKind string + if err := util.UnstructuredUnmarshalField(infraMachinePool, &infraMachineKind, "status", "infrastructureMachineKind"); err != nil { + if errors.Is(err, util.ErrUnstructuredFieldNotFound) { + noKind = true + } else { + return errors.Wrapf(err, "failed to retrieve infraMachineKind from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace) + } + } + + var infraMachineSelector metav1.LabelSelector + if err := util.UnstructuredUnmarshalField(infraMachinePool, &infraMachineSelector, "status", "infrastructureMachineSelector"); err != nil { + if errors.Is(err, util.ErrUnstructuredFieldNotFound) { + noSelector = true + } else { + return errors.Wrapf(err, "failed to retrieve infraMachineSelector from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace) + } + } + + // If one missing but not both, return error + if noKind && noSelector { + // If proper MachinePool Machines aren't supported by the InfraMachinePool, just create Machines to match the replica count. + log.Info("No infraMachineKind or infraMachineSelector found, reconciling fallback machines") + return r.reconcileFallbackMachines(ctx, mp) + } else if noKind != noSelector { + log.Info("Only one of infraMachineKind or infraMachineSelector found, this is unexpected") + return errors.Errorf("only one of infraMachineKind or infraMachineSelector found, both must be present") + } + + log.Info("Reconciling MachinePool Machines", "infrastructureMachineKind", infraMachineKind, "infrastructureMachineSelector", infraMachineSelector) + var infraMachineList unstructured.UnstructuredList + + infraMachineList.SetAPIVersion(infraMachinePool.GetAPIVersion()) + infraMachineList.SetKind(infraMachineKind) + + if err := r.Client.List(ctx, &infraMachineList, client.InNamespace(mp.Namespace), client.MatchingLabels(infraMachineSelector.MatchLabels)); err != nil { + return errors.Wrapf(err, "failed to list infra machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace) + } + + if err := r.deleteDanglingMachines(ctx, mp); err != nil { + return errors.Wrapf(err, "failed to clean up orphan machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace) + } + + if err := r.createMachinesIfNotExists(ctx, mp, infraMachineList.Items); err != nil { + return errors.Wrapf(err, "failed to create machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace) + } + + return nil +} + +// deleteDanglingMachines deletes all MachinePool Machines with an infraRef pointing to an infraMachine that no longer exists. +// This allows us to clean up Machines when the MachinePool is scaled down. +func (r *MachinePoolReconciler) deleteDanglingMachines(ctx context.Context, mp *expv1.MachinePool) error { + log := ctrl.LoggerFrom(ctx) + + log.V(2).Info("Deleting orphaned machines", "machinePool", mp.Name, "namespace", mp.Namespace) + machineList := &clusterv1.MachineList{} + labels := map[string]string{ + expv1.MachinePoolNameLabel: mp.Name, + clusterv1.ClusterNameLabel: mp.Spec.ClusterName, + clusterv1.MachinePoolOwnedLabel: "true", + } + + if err := r.Client.List(ctx, machineList, client.InNamespace(mp.Namespace), client.MatchingLabels(labels)); err != nil { + return err + } + + for i := range machineList.Items { + machine := &machineList.Items[i] + infraRef := machine.Spec.InfrastructureRef + + if (infraRef == corev1.ObjectReference{}) { + log.V(2).Info("Machine has an empty infraRef, this is unexpected for non-fallback machines, will delete anyway", "machine", machine.Name, "namespace", machine.Namespace) + if err := r.Client.Delete(ctx, machine); err != nil { + return err + } + } + + _, err := external.Get(ctx, r.Client, &infraRef, mp.Namespace) + if err != nil { + if apierrors.IsNotFound(err) { + log.V(2).Info("InfraMachine not found", "infraRef", infraRef) + } + + log.V(2).Info("Deleting orphaned machine", "machine", machine.Name, "namespace", machine.Namespace) + if err := r.Client.Delete(ctx, machine); err != nil { + return err + } + } else { + log.V(2).Info("Machine is not orphaned, nothing to do", "machine", machine.Name, "namespace", machine.Namespace) + } + } + + return nil +} + +// createMachinesIfNotExists creates a MachinePool Machine for each infraMachine if it doesn't already exist and sets the owner reference and infraRef. +func (r *MachinePoolReconciler) createMachinesIfNotExists(ctx context.Context, mp *expv1.MachinePool, infraMachines []unstructured.Unstructured) error { + machineList := &clusterv1.MachineList{} + labels := map[string]string{ + expv1.MachinePoolNameLabel: mp.Name, + clusterv1.ClusterNameLabel: mp.Spec.ClusterName, + clusterv1.MachinePoolOwnedLabel: "true", + } + + if err := r.Client.List(ctx, machineList, client.InNamespace(mp.Namespace), client.MatchingLabels(labels)); err != nil { + return err + } + + setOfInfraRefNames := map[string]struct{}{} + for _, machine := range machineList.Items { + infraRef := machine.Spec.InfrastructureRef + setOfInfraRefNames[infraRef.Name] = struct{}{} + } + + for i := range infraMachines { + infraMachine := &infraMachines[i] + if _, infraRefExists := setOfInfraRefNames[infraMachine.GetName()]; infraRefExists { + continue + } + + machine := getNewMachine(mp, infraMachine, false) + if err := r.Client.Create(ctx, machine); err != nil { + return errors.Wrapf(err, "failed to create new Machine for infraMachine %q in namespace %q", infraMachine.GetName(), infraMachine.GetNamespace()) + } + + // Set the owner reference on the infraMachine to the Machine since the infraMachine is created and owned by the infraMachinePool. + infraMachine.SetOwnerReferences([]metav1.OwnerReference{ + *metav1.NewControllerRef(machine, machine.GroupVersionKind()), + }) + + patchHelper, err := patch.NewHelper(infraMachine, r.Client) + if err != nil { + return errors.Wrapf(err, "failed to create patch helper for infraMachine %q in namespace %q", infraMachine.GetName(), infraMachine.GetNamespace()) + } + + if err := patchHelper.Patch(ctx, infraMachine); err != nil { + return errors.Wrapf(err, "failed to patch infraMachine %q in namespace %q", infraMachine.GetName(), infraMachine.GetNamespace()) + } + } + return nil +} + +// reconcileFallbackMachines creates Machines for MachinePools that do not support MachinePool Machines. +// These Machines will contain only basic information and exist to make a more consistent user experience across all MachinePools and MachineDeployments and do not have an infraRef. +func (r *MachinePoolReconciler) reconcileFallbackMachines(ctx context.Context, mp *expv1.MachinePool) error { + machineList := &clusterv1.MachineList{} + labels := map[string]string{ + expv1.MachinePoolNameLabel: mp.Name, + clusterv1.ClusterNameLabel: mp.Spec.ClusterName, + clusterv1.MachinePoolOwnedLabel: "true", + } + + if err := r.Client.List(ctx, machineList, client.InNamespace(mp.Namespace), client.MatchingLabels(labels)); err != nil { + return err + } + + numMachines := len(machineList.Items) + + if mp.Spec.Replicas == nil { + return errors.New("MachinePool.Spec.Replicas is nil, this is unexpected") + } + + for numMachines > int(*mp.Spec.Replicas) { + machine := &machineList.Items[numMachines-1] + if machine.Annotations == nil { + machine.Annotations = make(map[string]string) + } + machine.Annotations[clusterv1.AllowFallbackMachineDeletionAnnotation] = "true" + + // Initialize the patch helper. + patchHelper, err := patch.NewHelper(machine, r.Client) + if err != nil { + return err + } + + // Always attempt to Patch the external object. + if err := patchHelper.Patch(ctx, machine); err != nil { + return err + } + + if err := r.Client.Delete(ctx, machine); err != nil { + return errors.Wrapf(err, "failed to delete Machine %q in namespace %q", machineList.Items[numMachines-1].Name, machineList.Items[numMachines-1].Namespace) + } + numMachines-- + } + + for numMachines < int(*mp.Spec.Replicas) { + machine := getNewMachine(mp, nil, true) + if err := r.Client.Create(ctx, machine); err != nil { + return errors.Wrap(err, "failed to create new Machine") + } + numMachines++ + } + + return nil +} + +func setMachineLabel(machine *clusterv1.Machine, key string) { + if machine.Labels == nil { + machine.Labels = make(map[string]string) + } + machine.Labels[key] = "true" +} + +// getNewMachine creates a new Machine object. The name of the newly created resource is going +// to be created by the API server, we set the generateName field. +func getNewMachine(mp *expv1.MachinePool, infraMachine *unstructured.Unstructured, isFallback bool) *clusterv1.Machine { + gv := clusterv1.GroupVersion + + infraRef := corev1.ObjectReference{} + if infraMachine != nil { + infraRef.APIVersion = infraMachine.GetAPIVersion() + infraRef.Kind = infraMachine.GetKind() + infraRef.Name = infraMachine.GetName() + infraRef.Namespace = infraMachine.GetNamespace() + } + + annotations := mp.Spec.Template.Annotations + if annotations == nil { + annotations = make(map[string]string) + } + + machine := &clusterv1.Machine{ + // Note: The DockerMachine controller expects the name of the DockerMachine resource to match the Docker process. + // In this case, the Docker process, DockerMachine, and Machine will all have the same name, but maybe we want to change that. + ObjectMeta: metav1.ObjectMeta{ + GenerateName: fmt.Sprintf("%s-", mp.Name), + // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(mp, mp.GroupVersionKind())}, + Namespace: mp.Namespace, + Labels: make(map[string]string), + Annotations: annotations, + }, + TypeMeta: metav1.TypeMeta{ + Kind: gv.WithKind("Machine").Kind, + APIVersion: gv.String(), + }, + Spec: clusterv1.MachineSpec{ + ClusterName: mp.Spec.ClusterName, + InfrastructureRef: infraRef, + }, + } + + // Set the labels from machineSet.Spec.Template.Labels as labels for the new Machine. + // Note: We can't just set `machineSet.Spec.Template.Labels` directly and thus "share" the labels + // map between Machine and machineSet.Spec.Template.Labels. This would mean that adding the + // MachineSetNameLabel and MachineDeploymentNameLabel later on the Machine would also add the labels + // to machineSet.Spec.Template.Labels and thus modify the labels of the MachineSet. + for k, v := range mp.Spec.Template.Labels { + machine.Labels[k] = v + } + + // Enforce that the MachinePoolNameLabel label is Pool + // Note: the MachinePoolNameLabel is added by the default webhook to MachineSet.spec.template.labels if a spec.selector is empty. + machine.Labels[expv1.MachinePoolNameLabel] = capilabels.MustFormatValue(mp.Name) + machine.Labels[clusterv1.ClusterNameLabel] = mp.Spec.ClusterName + setMachineLabel(machine, clusterv1.MachinePoolOwnedLabel) + + if isFallback { + setMachineLabel(machine, clusterv1.FallbackMachineLabel) + } + + return machine +} diff --git a/exp/internal/controllers/machinepool_controller_phases_test.go b/exp/internal/controllers/machinepool_controller_phases_test.go index dee57c28ed9e..6004b61d45fc 100644 --- a/exp/internal/controllers/machinepool_controller_phases_test.go +++ b/exp/internal/controllers/machinepool_controller_phases_test.go @@ -33,8 +33,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" ) @@ -1013,6 +1015,497 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { } } +func TestReconcileMachinePoolMachines(t *testing.T) { + defaultCluster := clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: metav1.NamespaceDefault, + }, + } + + defaultMachinePool := expv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machinepool-test", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + }, + }, + Spec: expv1.MachinePoolSpec{ + ClusterName: defaultCluster.Name, + Replicas: pointer.Int32(2), + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "InfrastructureConfig", + Name: "infra-config1", + }, + }, + }, + }, + } + + infraMachine1 := unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "InfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-machine1", + "namespace": metav1.NamespaceDefault, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: defaultMachinePool.Name, + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + }, + } + + infraMachine2 := unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "InfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-machine2", + "namespace": metav1.NamespaceDefault, + "labels": map[string]interface{}{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: defaultMachinePool.Name, + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + }, + } + + machine1 := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: "machinepool-test", + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "InfrastructureMachine", + Name: "infra-machine1", + Namespace: metav1.NamespaceDefault, + }, + }, + } + + machine2 := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: "machinepool-test", + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "InfrastructureMachine", + Name: "infra-machine2", + Namespace: metav1.NamespaceDefault, + }, + }, + } + + machine3 := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine3", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: "machinepool-test", + clusterv1.MachinePoolOwnedLabel: "true", + clusterv1.FallbackMachineLabel: "true", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + }, + } + + machine4 := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine4", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: "machinepool-test", + clusterv1.MachinePoolOwnedLabel: "true", + clusterv1.FallbackMachineLabel: "true", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + }, + } + + machine5 := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine5", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: "machinepool-test", + clusterv1.MachinePoolOwnedLabel: "true", + clusterv1.FallbackMachineLabel: "true", + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: clusterName, + }, + } + + testCases := []struct { + name string + bootstrapConfig map[string]interface{} + infraConfig map[string]interface{} + machines []clusterv1.Machine + infraMachines []unstructured.Unstructured + machinepool *expv1.MachinePool + expectError bool + expectFallback bool + }{ + { + name: "two infra machines, should create two machinepool machines", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": metav1.NamespaceDefault, + }, + "spec": map[string]interface{}{ + "providerIDList": []interface{}{ + "test://id-1", + }, + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + "infrastructureMachineKind": "InfrastructureMachine", + "infrastructureMachineSelector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: defaultMachinePool.Name, + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + }, + }, + infraMachines: []unstructured.Unstructured{ + infraMachine1, + infraMachine2, + }, + expectError: false, + }, + { + name: "two infra machines and two machinepool machines, nothing to do", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": metav1.NamespaceDefault, + }, + "spec": map[string]interface{}{ + "providerIDList": []interface{}{ + "test://id-1", + }, + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + "infrastructureMachineKind": "InfrastructureMachine", + "infrastructureMachineSelector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: defaultMachinePool.Name, + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + }, + }, + machines: []clusterv1.Machine{ + machine1, + machine2, + }, + infraMachines: []unstructured.Unstructured{ + infraMachine1, + infraMachine2, + }, + expectError: false, + }, + { + name: "one extra machine, should delete", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": metav1.NamespaceDefault, + }, + "spec": map[string]interface{}{ + "providerIDList": []interface{}{ + "test://id-1", + }, + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + "infrastructureMachineKind": "InfrastructureMachine", + "infrastructureMachineSelector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: defaultMachinePool.Name, + clusterv1.MachinePoolOwnedLabel: "true", + }, + }, + }, + }, + machines: []clusterv1.Machine{ + machine1, + machine2, + }, + infraMachines: []unstructured.Unstructured{ + infraMachine1, + }, + expectError: false, + }, + { + name: "fallback machinepool, nothing to do", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": metav1.NamespaceDefault, + }, + "spec": map[string]interface{}{ + "providerIDList": []interface{}{ + "test://id-1", + }, + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + }, + }, + machines: []clusterv1.Machine{ + machine3, + machine4, + }, + expectError: false, + expectFallback: true, + }, + { + name: "fallback machinepool, create one more machine", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": metav1.NamespaceDefault, + }, + "spec": map[string]interface{}{ + "providerIDList": []interface{}{ + "test://id-1", + }, + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + }, + }, + machines: []clusterv1.Machine{ + machine3, + }, + expectError: false, + expectFallback: true, + }, + { + name: "fallback machinepool, delete extra machine", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": metav1.NamespaceDefault, + }, + "spec": map[string]interface{}{ + "providerIDList": []interface{}{ + "test://id-1", + }, + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + }, + }, + machines: []clusterv1.Machine{ + machine3, + machine4, + machine5, + }, + expectError: false, + expectFallback: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + if tc.machinepool == nil { + tc.machinepool = defaultMachinePool.DeepCopy() + } + + objs := []client.Object{defaultCluster.DeepCopy()} + + infraConfig := &unstructured.Unstructured{Object: tc.infraConfig} + + objs = append(objs, tc.machinepool, infraConfig.DeepCopy()) + + for _, infraMachine := range tc.infraMachines { + objs = append(objs, infraMachine.DeepCopy()) + } + + for _, machine := range tc.machines { + objs = append(objs, machine.DeepCopy()) + } + + r := &MachinePoolReconciler{ + Client: fake.NewClientBuilder().WithObjects(objs...).Build(), + } + + err := r.reconcileMachinePoolMachines(ctx, tc.machinepool, infraConfig) + + r.reconcilePhase(tc.machinepool) + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + + g.Eventually(func() bool { + machineList := &clusterv1.MachineList{} + labels := map[string]string{ + clusterv1.ClusterNameLabel: defaultCluster.Name, + expv1.MachinePoolNameLabel: tc.machinepool.Name, + clusterv1.MachinePoolOwnedLabel: "true", + } + if err := r.Client.List(ctx, machineList, client.InNamespace(tc.machinepool.Namespace), client.MatchingLabels(labels)); err != nil { + t.Log("Failed to list machines with error:", err) + return false + } + + if tc.expectFallback { + if len(machineList.Items) != int(*tc.machinepool.Spec.Replicas) { + t.Logf("Machine list length %d != replicas %d", len(machineList.Items), *tc.machinepool.Spec.Replicas) + + return false + } + } else { + if len(machineList.Items) != len(tc.infraMachines) { + t.Logf("Machine list length %d != infraMachine list length %d", len(machineList.Items), len(tc.infraMachines)) + + return false + } + + for i := range machineList.Items { + machine := &machineList.Items[i] + infraMachine, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Namespace) + if err != nil { + t.Log("Failed to get infraMachine with error:", err) + return false + } + + if util.IsControlledBy(infraMachine, machine) { + t.Log("InfraMachine is not controlled by machine") + return false + } + } + } + + return true + }, timeout).Should(BeTrue()) + } + }) + } +} + func TestReconcileMachinePoolScaleToFromZero(t *testing.T) { g := NewWithT(t) diff --git a/exp/internal/controllers/suite_test.go b/exp/internal/controllers/suite_test.go index 5cd27d812c97..42f81a48285d 100644 --- a/exp/internal/controllers/suite_test.go +++ b/exp/internal/controllers/suite_test.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "testing" + "time" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -31,8 +32,9 @@ import ( ) var ( - env *envtest.Environment - ctx = ctrl.SetupSignalHandler() + timeout = time.Second * 30 + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() ) func TestMain(m *testing.M) { diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 743a3957b224..c3c050d61ddf 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -267,6 +267,8 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust } func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + // If the machine is a stand-alone one, meaning not originated from a MachineDeployment, then set it as directly // owned by the Cluster (if not already present). if r.shouldAdopt(m) { @@ -278,6 +280,12 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, }) } + if _, isFallbackMachine := m.Labels[clusterv1.FallbackMachineLabel]; isFallbackMachine { + log.Info("Skipping reconciliation for fallback machine") + // Do not reconcile if the machine is a fallback machine. + return ctrl.Result{}, nil + } + phases := []func(context.Context, *clusterv1.Cluster, *clusterv1.Machine) (ctrl.Result, error){ r.reconcileBootstrap, r.reconcileInfrastructure, diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index d2664aca9133..514ee4fd99e0 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -136,6 +136,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust // Do the remaining node health checks, then set the node health to true if all checks pass. status, message := summarizeNodeConditions(node) if status == corev1.ConditionFalse { + // TODO: There's a bug where the node healthy condition is never set to true. conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, message) return ctrl.Result{}, nil } diff --git a/test/infrastructure/docker/api/v1alpha3/conversion.go b/test/infrastructure/docker/api/v1alpha3/conversion.go index 6bdb64825187..0edfc3904947 100644 --- a/test/infrastructure/docker/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/api/v1alpha3/conversion.go @@ -78,13 +78,34 @@ func (dst *DockerClusterList) ConvertFrom(srcRaw conversion.Hub) error { func (src *DockerMachine) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infrav1.DockerMachine) - return Convert_v1alpha3_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil) + if err := Convert_v1alpha3_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infrav1.DockerMachine{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Spec.InstanceName = restored.Spec.InstanceName + + return nil } func (dst *DockerMachine) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infrav1.DockerMachine) - return Convert_v1beta1_DockerMachine_To_v1alpha3_DockerMachine(src, dst, nil) + if err := Convert_v1beta1_DockerMachine_To_v1alpha3_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil } func (src *DockerMachineList) ConvertTo(dstRaw conversion.Hub) error { @@ -113,6 +134,7 @@ func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta + dst.Spec.Template.Spec.InstanceName = restored.Spec.Template.Spec.InstanceName return nil } @@ -154,3 +176,7 @@ func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemp // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(in, out, s) } + +func Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *infrav1.DockerMachineSpec, out *DockerMachineSpec, s apiconversion.Scope) error { + return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in, out, s) +} diff --git a/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go b/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go index d86ac186fff9..363a145af4d1 100644 --- a/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go +++ b/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go @@ -108,11 +108,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*DockerMachineStatus)(nil), (*v1beta1.DockerMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_DockerMachineStatus_To_v1beta1_DockerMachineStatus(a.(*DockerMachineStatus), b.(*v1beta1.DockerMachineStatus), scope) }); err != nil { @@ -173,6 +168,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta1.DockerMachineTemplateResource)(nil), (*DockerMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(a.(*v1beta1.DockerMachineTemplateResource), b.(*DockerMachineTemplateResource), scope) }); err != nil { @@ -480,6 +480,7 @@ func Convert_v1alpha3_DockerMachineSpec_To_v1beta1_DockerMachineSpec(in *DockerM } func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { + // WARNING: in.InstanceName requires manual conversion: does not exist in peer-type out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.CustomImage = in.CustomImage out.PreLoadImages = *(*[]string)(unsafe.Pointer(&in.PreLoadImages)) @@ -488,11 +489,6 @@ func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *v1b return nil } -// Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in, out, s) -} - func autoConvert_v1alpha3_DockerMachineStatus_To_v1beta1_DockerMachineStatus(in *DockerMachineStatus, out *v1beta1.DockerMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.LoadBalancerConfigured = in.LoadBalancerConfigured diff --git a/test/infrastructure/docker/api/v1alpha4/conversion.go b/test/infrastructure/docker/api/v1alpha4/conversion.go index a1b42d8f4d53..f7a362420e1e 100644 --- a/test/infrastructure/docker/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/conversion.go @@ -96,13 +96,34 @@ func (dst *DockerClusterTemplateList) ConvertFrom(srcRaw conversion.Hub) error { func (src *DockerMachine) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infrav1.DockerMachine) - return Convert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil) + if err := Convert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infrav1.DockerMachine{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Spec.InstanceName = restored.Spec.InstanceName + + return nil } func (dst *DockerMachine) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infrav1.DockerMachine) - return Convert_v1beta1_DockerMachine_To_v1alpha4_DockerMachine(src, dst, nil) + if err := Convert_v1beta1_DockerMachine_To_v1alpha4_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil } func (src *DockerMachineList) ConvertTo(dstRaw conversion.Hub) error { @@ -131,6 +152,7 @@ func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta + dst.Spec.Template.Spec.InstanceName = restored.Spec.Template.Spec.InstanceName return nil } @@ -171,3 +193,7 @@ func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemp // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(in, out, s) } + +func Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *infrav1.DockerMachineSpec, out *DockerMachineSpec, s apiconversion.Scope) error { + return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in, out, s) +} diff --git a/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go b/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go index 9594641da789..650dcc92a355 100644 --- a/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go @@ -158,11 +158,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*DockerMachineStatus)(nil), (*v1beta1.DockerMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_DockerMachineStatus_To_v1beta1_DockerMachineStatus(a.(*DockerMachineStatus), b.(*v1beta1.DockerMachineStatus), scope) }); err != nil { @@ -233,6 +228,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta1.DockerMachineTemplateResource)(nil), (*DockerMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(a.(*v1beta1.DockerMachineTemplateResource), b.(*DockerMachineTemplateResource), scope) }); err != nil { @@ -686,6 +686,7 @@ func Convert_v1alpha4_DockerMachineSpec_To_v1beta1_DockerMachineSpec(in *DockerM } func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { + // WARNING: in.InstanceName requires manual conversion: does not exist in peer-type out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.CustomImage = in.CustomImage out.PreLoadImages = *(*[]string)(unsafe.Pointer(&in.PreLoadImages)) @@ -694,11 +695,6 @@ func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *v1b return nil } -// Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in, out, s) -} - func autoConvert_v1alpha4_DockerMachineStatus_To_v1beta1_DockerMachineStatus(in *DockerMachineStatus, out *v1beta1.DockerMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.LoadBalancerConfigured = in.LoadBalancerConfigured diff --git a/test/infrastructure/docker/api/v1beta1/dockermachine_types.go b/test/infrastructure/docker/api/v1beta1/dockermachine_types.go index d99e30be5455..f798d56d73cc 100644 --- a/test/infrastructure/docker/api/v1beta1/dockermachine_types.go +++ b/test/infrastructure/docker/api/v1beta1/dockermachine_types.go @@ -30,6 +30,13 @@ const ( // DockerMachineSpec defines the desired state of DockerMachine. type DockerMachineSpec struct { + // InstanceName indicates the name of the Docker container associated with this DockerMachine. + // Since the provider ID is not set until after the container is online, this field is used to + // maintain the association. If it is not populated, the name of the container will be the same + // as the name of the owner Machine. + // +optional + InstanceName string `json:"instanceName,omitempty"` + // ProviderID will be the container name in ProviderID format (docker:////) // +optional ProviderID *string `json:"providerID,omitempty"` diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml index 1114d59ca51d..1587298ce293 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml @@ -502,6 +502,56 @@ spec: - type type: object type: array + infrastructureMachineKind: + description: InfrastructureMachineKind is the kind of the infrastructure + resources behind MachinePool Machines. + type: string + infrastructureMachineSelector: + description: 'InfrastructureMachineSelector is a label query over + the infrastructure resources behind MachinePool Machines. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object instances: description: Instances contains the status for each instance in the pool diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml index 58bb2c449c88..4171bb20104f 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml @@ -378,6 +378,13 @@ spec: type: boolean type: object type: array + instanceName: + description: InstanceName indicates the name of the Docker container + associated with this DockerMachine. Since the provider ID is not + set until after the container is online, this field is used to maintain + the association. If it is not populated, the name of the container + will be the same as the name of the owner Machine. + type: string preLoadImages: description: PreLoadImages allows to pre-load images in a newly created machine. This can be used to speed up tests by avoiding e.g. to diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml index 7d2f36e390dd..7857d3428185 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml @@ -273,6 +273,14 @@ spec: type: boolean type: object type: array + instanceName: + description: InstanceName indicates the name of the Docker + container associated with this DockerMachine. Since the + provider ID is not set until after the container is online, + this field is used to maintain the association. If it is + not populated, the name of the container will be the same + as the name of the owner Machine. + type: string preLoadImages: description: PreLoadImages allows to pre-load images in a newly created machine. This can be used to speed up tests diff --git a/test/infrastructure/docker/config/webhook/manifests.yaml b/test/infrastructure/docker/config/webhook/manifests.yaml index 2b0868d36253..ebd806b28fce 100644 --- a/test/infrastructure/docker/config/webhook/manifests.yaml +++ b/test/infrastructure/docker/config/webhook/manifests.yaml @@ -49,6 +49,28 @@ webhooks: resources: - dockerclustertemplates sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta1-dockermachinepool + failurePolicy: Fail + matchPolicy: Equivalent + name: default.dockermachinepool.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - dockermachinepools + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration diff --git a/test/infrastructure/docker/exp/api/v1alpha3/conversion.go b/test/infrastructure/docker/exp/api/v1alpha3/conversion.go index 3eeb15387b8b..55d8878589e6 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/conversion.go @@ -17,21 +17,45 @@ limitations under the License. package v1alpha3 import ( + apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1alpha3_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + if err := Convert_v1alpha3_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infraexpv1.DockerMachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Status.InfrastructureMachineKind = restored.Status.InfrastructureMachineKind + dst.Status.InfrastructureMachineSelector = restored.Status.InfrastructureMachineSelector + + return nil } func (dst *DockerMachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1beta1_DockerMachinePool_To_v1alpha3_DockerMachinePool(src, dst, nil) + if err := Convert_v1beta1_DockerMachinePool_To_v1alpha3_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil } func (src *DockerMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { @@ -45,3 +69,8 @@ func (dst *DockerMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta1_DockerMachinePoolList_To_v1alpha3_DockerMachinePoolList(src, dst, nil) } + +func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in *infraexpv1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s apiconversion.Scope) error { + // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. + return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in, out, s) +} diff --git a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go index 311ad6ada301..2d140d5072c9 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go @@ -95,7 +95,7 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + if err := s.AddConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) }); err != nil { return err @@ -339,10 +339,7 @@ func autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolSt } else { out.Conditions = nil } + // WARNING: in.InfrastructureMachineSelector requires manual conversion: does not exist in peer-type + // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type return nil } - -// Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in *v1beta1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in, out, s) -} diff --git a/test/infrastructure/docker/exp/api/v1alpha4/conversion.go b/test/infrastructure/docker/exp/api/v1alpha4/conversion.go index acc200d9e51a..e85bdbcd34e6 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/conversion.go @@ -17,21 +17,45 @@ limitations under the License. package v1alpha4 import ( + apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1alpha4_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + if err := Convert_v1alpha4_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infraexpv1.DockerMachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Status.InfrastructureMachineKind = restored.Status.InfrastructureMachineKind + dst.Status.InfrastructureMachineSelector = restored.Status.InfrastructureMachineSelector + + return nil } func (dst *DockerMachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1beta1_DockerMachinePool_To_v1alpha4_DockerMachinePool(src, dst, nil) + if err := Convert_v1beta1_DockerMachinePool_To_v1alpha4_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil } func (src *DockerMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { @@ -45,3 +69,8 @@ func (dst *DockerMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta1_DockerMachinePoolList_To_v1alpha4_DockerMachinePoolList(src, dst, nil) } + +func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in *infraexpv1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s apiconversion.Scope) error { + // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. + return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in, out, s) +} diff --git a/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go b/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go index 4f302088997d..612a615db8a5 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go @@ -95,7 +95,7 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + if err := s.AddConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) }); err != nil { return err @@ -339,10 +339,7 @@ func autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolSt } else { out.Conditions = nil } + // WARNING: in.InfrastructureMachineSelector requires manual conversion: does not exist in peer-type + // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type return nil } - -// Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in *v1beta1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in, out, s) -} diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go index 273475366b4b..055243311431 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go @@ -26,6 +26,9 @@ import ( const ( // MachinePoolFinalizer allows ReconcileDockerMachinePool to clean up resources. MachinePoolFinalizer = "dockermachinepool.infrastructure.cluster.x-k8s.io" + + // DockerMachinePoolNameLabel is the label indicating the name of the DockerMachinePool a DockerMachine is controleld by. + DockerMachinePoolNameLabel = "dockermachinepool.infrastructure.cluster.x-k8s.io/pool-name" ) // DockerMachinePoolMachineTemplate defines the desired state of DockerMachine. @@ -82,6 +85,15 @@ type DockerMachinePoolStatus struct { // Conditions defines current service state of the DockerMachinePool. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` + + // InfrastructureMachineSelector is a label query over the infrastructure resources behind MachinePool Machines. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + InfrastructureMachineSelector metav1.LabelSelector `json:"infrastructureMachineSelector,omitempty"` + + // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. + // +optional + InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` } // DockerMachinePoolInstanceStatus contains status information about a DockerMachinePool. @@ -130,13 +142,13 @@ type DockerMachinePool struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerMachinePool) GetConditions() clusterv1.Conditions { - return c.Status.Conditions +func (d *DockerMachinePool) GetConditions() clusterv1.Conditions { + return d.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { - c.Status.Conditions = conditions +func (d *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { + d.Status.Conditions = conditions } // +kubebuilder:object:root=true diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_webhook.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_webhook.go index a6ffc3bbdaf1..4eee20646193 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_webhook.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_webhook.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,11 +17,32 @@ limitations under the License. package v1beta1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -func (c *DockerMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (d *DockerMachinePool) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(c). + For(d). Complete() } + +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta1-dockermachinepool,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepools,versions=v1beta1,name=default.dockermachinepool.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +var _ webhook.Defaulter = &DockerMachinePool{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type. +func (d *DockerMachinePool) Default() { + d.Status.InfrastructureMachineSelector = metav1.LabelSelector{ + MatchLabels: map[string]string{ + // clusterv1.ClusterNameLabel: d.Spec.ClusterName, + clusterv1.MachinePoolOwnedLabel: "true", + DockerMachinePoolNameLabel: d.Name, + }, + } + + d.Status.InfrastructureMachineKind = "DockerMachine" +} diff --git a/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go b/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go index cf1e613a00d1..8a92065572e9 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go @@ -179,6 +179,7 @@ func (in *DockerMachinePoolStatus) DeepCopyInto(out *DockerMachinePoolStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.InfrastructureMachineSelector.DeepCopyInto(&out.InfrastructureMachineSelector) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolStatus. diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index e6d5938d5793..b60fc8d51591 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -39,9 +40,11 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilexp "sigs.k8s.io/cluster-api/exp/util" "sigs.k8s.io/cluster-api/test/infrastructure/container" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/docker" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -106,6 +109,19 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } + dockerMachinePool.Status.InfrastructureMachineSelector = metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.MachinePoolOwnedLabel: "true", + infraexpv1.DockerMachinePoolNameLabel: dockerMachinePool.Name, + }, + } + dockerMachinePool.Status.InfrastructureMachineKind = "DockerMachine" + // Patch now so that the status and selectors are available. + if err := patchHelper.Patch(ctx, dockerMachinePool); err != nil { + return ctrl.Result{}, err + } + // Always attempt to Patch the DockerMachinePool object and status after each reconciliation. defer func() { if err := patchDockerMachinePool(ctx, patchHelper, dockerMachinePool); err != nil { @@ -166,7 +182,39 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr } func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) (ctrl.Result, error) { - pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool) + log := ctrl.LoggerFrom(ctx) + + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return ctrl.Result{}, err + } + + nodePoolInstances := make([]docker.NodePoolInstance, len(dockerMachineList.Items)) + for i, dockerMachine := range dockerMachineList.Items { + machine, err := util.GetOwnerMachine(ctx, r.Client, dockerMachine.ObjectMeta) + if err != nil { + log.V(2).Error(err, "failed to get owner machine, skipping") + continue + } + if machine == nil { + log.V(2).Info("owner machine not found, skipping", "docker-machine", dockerMachine.Name) + continue + } + + hasDeleteAnnotation := false + if machine.Annotations != nil { + _, hasDeleteAnnotation = machine.Annotations[clusterv1.DeleteMachineAnnotation] + } + + nodePoolInstances[i] = docker.NodePoolInstance{ + InstanceName: dockerMachine.Spec.InstanceName, + Bootstrapped: dockerMachine.Spec.Bootstrapped, + ProviderID: dockerMachine.Spec.ProviderID, + PrioritizeDelete: hasDeleteAnnotation, + } + } + + pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool, nodePoolInstances) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") } @@ -192,7 +240,32 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust machinePool.Spec.Replicas = pointer.Int32(1) } - pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool) + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return ctrl.Result{}, err + } + + // Since nodepools don't persist the instances list, we need to construct it from the list of DockerMachines. + nodePoolInstances := make([]docker.NodePoolInstance, len(dockerMachineList.Items)) + for i, dockerMachine := range dockerMachineList.Items { + machine, err := util.GetOwnerMachine(ctx, r.Client, dockerMachine.ObjectMeta) + if err != nil { + log.V(2).Error(err, "Failed to get owner machine, skipping") + } + var hasDeleteAnnotation bool + if machine != nil { + _, hasDeleteAnnotation = machine.Annotations[clusterv1.DeleteMachineAnnotation] + } + + nodePoolInstances[i] = docker.NodePoolInstance{ + InstanceName: dockerMachine.Spec.InstanceName, + Bootstrapped: dockerMachine.Spec.Bootstrapped, + ProviderID: dockerMachine.Spec.ProviderID, + PrioritizeDelete: hasDeleteAnnotation, + } + } + + pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool, nodePoolInstances) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") } @@ -202,20 +275,35 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to generate workload cluster client") } + res, err := pool.ReconcileMachines(ctx, remoteClient) if err != nil { return res, err } + nodePoolInstancesResult := pool.GetNodePoolInstances() + // Derive info from Status.Instances dockerMachinePool.Spec.ProviderIDList = []string{} - for _, instance := range dockerMachinePool.Status.Instances { - if instance.ProviderID != nil && instance.Ready { + for _, instance := range nodePoolInstancesResult { + if instance.ProviderID != nil { dockerMachinePool.Spec.ProviderIDList = append(dockerMachinePool.Spec.ProviderIDList, *instance.ProviderID) } } - dockerMachinePool.Status.Replicas = int32(len(dockerMachinePool.Status.Instances)) + // Delete all DockerMachines that are not in the list of instances returned by the node pool. + if err := r.DeleteDanglingDockerMachines(ctx, dockerMachinePool, nodePoolInstancesResult); err != nil { + conditions.MarkFalse(dockerMachinePool, clusterv1.ReadyCondition, "FailedToDeleteOrphanedMachines", clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, errors.Wrap(err, "failed to delete orphaned machines") + } + + // Create a DockerMachine for each instance returned by the node pool if it doesn't exist. + if err := r.CreateDockerMachinesIfNotExists(ctx, dockerMachinePool, nodePoolInstancesResult); err != nil { + conditions.MarkFalse(dockerMachinePool, clusterv1.ReadyCondition, "FailedToCreateNewMachines", clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, errors.Wrap(err, "failed to create missing machines") + } + + dockerMachinePool.Status.Replicas = int32(len(dockerMachineList.Items)) if dockerMachinePool.Spec.ProviderID == "" { // This is a fake provider ID which does not tie back to any docker infrastructure. In cloud providers, @@ -224,25 +312,123 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust dockerMachinePool.Spec.ProviderID = getDockerMachinePoolProviderID(cluster.Name, dockerMachinePool.Name) } - dockerMachinePool.Status.Ready = len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) + if len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) { + dockerMachinePool.Status.Ready = true + conditions.MarkTrue(dockerMachinePool, expv1.ReplicasReadyCondition) + } else { + dockerMachinePool.Status.Ready = false + conditions.MarkFalse(dockerMachinePool, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") - // if some machine is still provisioning, force reconcile in few seconds to check again infrastructure. - if !dockerMachinePool.Status.Ready && res.IsZero() { - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + // if some machine is still provisioning, force reconcile in few seconds to check again infrastructure. + if res.IsZero() { + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } } + return res, nil } +func getDockerMachines(ctx context.Context, c client.Client, dockerMachinePool infraexpv1.DockerMachinePool) (*infrav1.DockerMachineList, error) { + dockerMachineList := &infrav1.DockerMachineList{} + labels := dockerMachinePool.Status.InfrastructureMachineSelector.MatchLabels + if err := c.List(ctx, dockerMachineList, client.InNamespace(dockerMachinePool.Namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + + return dockerMachineList, nil +} + +// DeleteDanglingDockerMachines deletes any DockerMachines owned by the DockerMachinePool that reference an invalid providerID, i.e. not in the latest copy of the node pool instances. +func (r *DockerMachinePoolReconciler) DeleteDanglingDockerMachines(ctx context.Context, dockerMachinePool *infraexpv1.DockerMachinePool, instances []docker.NodePoolInstance) error { + log := ctrl.LoggerFrom(ctx) + log.V(2).Info("Deleting orphaned machines", "dockerMachinePool", dockerMachinePool.Name, "namespace", dockerMachinePool.Namespace, "instances", instances) + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return err + } + + log.V(2).Info("DockerMachineList kind is", "kind", dockerMachineList.GetObjectKind()) + + instanceNameSet := map[string]struct{}{} + for _, instance := range instances { + instanceNameSet[instance.InstanceName] = struct{}{} + } + + for i := range dockerMachineList.Items { + dockerMachine := &dockerMachineList.Items[i] + if _, ok := instanceNameSet[dockerMachine.Spec.InstanceName]; !ok { + log.V(2).Info("Deleting orphaned DockerMachine", "dockerMachine", dockerMachine.Name, "namespace", dockerMachine.Namespace) + if err := r.Client.Delete(ctx, dockerMachine); err != nil { + return err + } + } else { + log.V(2).Info("Keeping DockerMachine, nothing to do", "dockerMachine", dockerMachine.Name, "namespace", dockerMachine.Namespace) + } + } + + return nil +} + +// CreateDockerMachinesIfNotExists creates a DockerMachine for each instance returned by the node pool if it doesn't exist. +func (r *DockerMachinePoolReconciler) CreateDockerMachinesIfNotExists(ctx context.Context, dockerMachinePool *infraexpv1.DockerMachinePool, instances []docker.NodePoolInstance) error { + log := ctrl.LoggerFrom(ctx) + + log.V(2).Info("Creating missing machines", "dockerMachinePool", dockerMachinePool.Name, "namespace", dockerMachinePool.Namespace, "instances", instances) + + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return err + } + + instanceNameToDockerMachine := make(map[string]infrav1.DockerMachine) + for _, dockerMachine := range dockerMachineList.Items { + instanceNameToDockerMachine[dockerMachine.Spec.InstanceName] = dockerMachine + } + + for _, instance := range instances { + if _, exists := instanceNameToDockerMachine[instance.InstanceName]; !exists { + dockerMachine := &infrav1.DockerMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: dockerMachinePool.Namespace, + GenerateName: fmt.Sprintf("%s-", dockerMachinePool.Name), + Labels: dockerMachinePool.Status.InfrastructureMachineSelector.MatchLabels, + Annotations: make(map[string]string), + // Note: This DockerMachine will be owned by the DockerMachinePool until the MachinePool controller creates its parent Machine. + }, + Spec: infrav1.DockerMachineSpec{ + InstanceName: instance.InstanceName, + }, + } + + log.V(2).Info("Instance name for dockerMachine is", "instanceName", instance.InstanceName, "dockerMachine", dockerMachine.Name) + + if err := r.Client.Create(ctx, dockerMachine); err != nil { + return errors.Wrap(err, "failed to create dockerMachine") + } + } + } + + return nil +} + func getDockerMachinePoolProviderID(clusterName, dockerMachinePoolName string) string { return fmt.Sprintf("docker:////%s-dmp-%s", clusterName, dockerMachinePoolName) } func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infraexpv1.DockerMachinePool) error { - // TODO: add conditions + conditions.SetSummary(dockerMachinePool, + conditions.WithConditions( + expv1.ReplicasReadyCondition, + ), + ) // Patch the object, ignoring conflicts on the conditions owned by this controller. return patchHelper.Patch( ctx, dockerMachinePool, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + expv1.ReplicasReadyCondition, + }}, ) } diff --git a/test/infrastructure/docker/exp/internal/docker/nodepool.go b/test/infrastructure/docker/exp/internal/docker/nodepool.go index 896fe8749163..3787c6e7cd34 100644 --- a/test/infrastructure/docker/exp/internal/docker/nodepool.go +++ b/test/infrastructure/docker/exp/internal/docker/nodepool.go @@ -22,6 +22,7 @@ import ( "encoding/base64" "fmt" "math/rand" + "sort" "strings" "time" @@ -55,16 +56,45 @@ type NodePool struct { dockerMachinePool *infraexpv1.DockerMachinePool labelFilters map[string]string machines []*docker.Machine + nodePoolInstances []NodePoolInstance // Note: This must be initialized when creating a new node pool and updated to reflect the `machines` slice. +} + +// NodePoolInstance is a representation of a node pool instance used to provide the information to construct DockerMachines. +type NodePoolInstance struct { + InstanceName string + Bootstrapped bool + ProviderID *string + PrioritizeDelete bool + Ready bool +} + +func (np NodePool) Len() int { return len(np.machines) } +func (np NodePool) Swap(i, j int) { np.machines[i], np.machines[j] = np.machines[j], np.machines[i] } +func (np NodePool) Less(i, j int) bool { + var instanceI, instanceJ NodePoolInstance + for _, instance := range np.nodePoolInstances { + if instance.InstanceName == np.machines[i].Name() { + instanceI = instance + } + if instance.InstanceName == np.machines[j].Name() { + instanceJ = instance + } + } + if instanceI.PrioritizeDelete == instanceJ.PrioritizeDelete { + return instanceI.InstanceName < instanceJ.InstanceName + } + return instanceJ.PrioritizeDelete } // NewNodePool creates a new node pool instances. -func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *expv1.MachinePool, dmp *infraexpv1.DockerMachinePool) (*NodePool, error) { +func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *expv1.MachinePool, dmp *infraexpv1.DockerMachinePool, nodePoolInstances []NodePoolInstance) (*NodePool, error) { np := &NodePool{ client: c, cluster: cluster, machinePool: mp, dockerMachinePool: dmp, labelFilters: map[string]string{dockerMachinePoolLabel: dmp.Name}, + nodePoolInstances: nodePoolInstances, } if err := np.refresh(ctx); err != nil { @@ -73,6 +103,11 @@ func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluste return np, nil } +// GetNodePoolInstances returns the node pool instances providing the information to construct DockerMachines. +func (np *NodePool) GetNodePoolInstances() []NodePoolInstance { + return np.nodePoolInstances +} + // ReconcileMachines will build enough machines to satisfy the machine pool / docker machine pool spec // eventually delete all the machine in excess, and update the status for all the machines. // @@ -126,9 +161,9 @@ func (np *NodePool) ReconcileMachines(ctx context.Context, remoteClient client.C // First remove instance status for machines no longer existing, then reconcile the existing machines. // NOTE: the status is the only source of truth for understanding if the machine is already bootstrapped, ready etc. // so we are preserving the existing status and using it as a bases for the next reconcile machine. - instances := make([]infraexpv1.DockerMachinePoolInstanceStatus, 0, len(np.machines)) - for i := range np.dockerMachinePool.Status.Instances { - instance := np.dockerMachinePool.Status.Instances[i] + instances := make([]NodePoolInstance, 0, len(np.machines)) + for i := range np.nodePoolInstances { + instance := np.nodePoolInstances[i] for j := range np.machines { if instance.InstanceName == np.machines[j].Name() { instances = append(instances, instance) @@ -136,7 +171,7 @@ func (np *NodePool) ReconcileMachines(ctx context.Context, remoteClient client.C } } } - np.dockerMachinePool.Status.Instances = instances + np.nodePoolInstances = instances result := ctrl.Result{} for i := range np.machines { @@ -164,6 +199,8 @@ func (np *NodePool) Delete(ctx context.Context) error { } } + // Note: We can set `np.nodePoolInstances = nil` here, but it shouldn't be necessary on Delete(). + return nil } @@ -237,6 +274,9 @@ func (np *NodePool) refresh(ctx context.Context) error { np.machines = append(np.machines, machine) } } + + sort.Sort(np) + return nil } @@ -244,29 +284,28 @@ func (np *NodePool) refresh(ctx context.Context) error { func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machine, remoteClient client.Client) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - var machineStatus infraexpv1.DockerMachinePoolInstanceStatus + var nodePoolInstance NodePoolInstance isFound := false - for _, instanceStatus := range np.dockerMachinePool.Status.Instances { - if instanceStatus.InstanceName == machine.Name() { - machineStatus = instanceStatus + for _, instance := range np.nodePoolInstances { + if instance.InstanceName == machine.Name() { + nodePoolInstance = instance isFound = true } } if !isFound { log.Info("Creating instance record", "instance", machine.Name()) - machineStatus = infraexpv1.DockerMachinePoolInstanceStatus{ + nodePoolInstance = NodePoolInstance{ InstanceName: machine.Name(), - Version: np.machinePool.Spec.Template.Spec.Version, } - np.dockerMachinePool.Status.Instances = append(np.dockerMachinePool.Status.Instances, machineStatus) + np.nodePoolInstances = append(np.nodePoolInstances, nodePoolInstance) // return to surface the new machine exists. return ctrl.Result{Requeue: true}, nil } defer func() { - for i, instanceStatus := range np.dockerMachinePool.Status.Instances { - if instanceStatus.InstanceName == machine.Name() { - np.dockerMachinePool.Status.Instances[i] = machineStatus + for i, instance := range np.nodePoolInstances { + if instance.InstanceName == machine.Name() { + np.nodePoolInstances[i] = instance } } }() @@ -277,7 +316,7 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin } // if the machine isn't bootstrapped, only then run bootstrap scripts - if !machineStatus.Bootstrapped { + if !nodePoolInstance.Bootstrapped { timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) defer cancel() @@ -305,39 +344,12 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin return ctrl.Result{}, errors.Wrap(err, "failed to check for existence of bootstrap success file at /run/cluster-api/bootstrap-success.complete") } } - machineStatus.Bootstrapped = true // return to surface the machine has been bootstrapped. return ctrl.Result{Requeue: true}, nil } - if machineStatus.Addresses == nil { - log.Info("Fetching instance addresses", "instance", machine.Name()) - // set address in machine status - machineAddress, err := externalMachine.Address(ctx) - if err != nil { - // Requeue if there is an error, as this is likely momentary load balancer - // state changes during control plane provisioning. - return ctrl.Result{Requeue: true}, nil //nolint:nilerr - } - - machineStatus.Addresses = []clusterv1.MachineAddress{ - { - Type: clusterv1.MachineHostName, - Address: externalMachine.ContainerName(), - }, - { - Type: clusterv1.MachineInternalIP, - Address: machineAddress, - }, - { - Type: clusterv1.MachineExternalIP, - Address: machineAddress, - }, - } - } - - if machineStatus.ProviderID == nil { + if nodePoolInstance.ProviderID == nil { log.Info("Fetching instance provider ID", "instance", machine.Name()) // Usually a cloud provider will do this, but there is no docker-cloud provider. // Requeue if there is an error, as this is likely momentary load balancer @@ -346,12 +358,8 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin log.V(4).Info("transient error setting the provider id") return ctrl.Result{Requeue: true}, nil //nolint:nilerr } - // Set ProviderID so the Cluster API Machine Controller can pull it - providerID := externalMachine.ProviderID() - machineStatus.ProviderID = &providerID } - machineStatus.Ready = true return ctrl.Result{}, nil } diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index 3b8ed794adbc..40460ad98e40 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -146,8 +146,20 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } + if _, hasDeleteAnnotation := machine.Annotations[clusterv1.DeleteMachineAnnotation]; hasDeleteAnnotation { + if dockerMachine.Annotations == nil { + dockerMachine.Annotations = map[string]string{} + } + dockerMachine.Annotations[clusterv1.DeleteMachineAnnotation] = "true" + } + + if dockerMachine.Spec.InstanceName == "" { + log.V(2).Info("InstanceName not set for DockerMachine, defaulting to owner machine name", "machine", machine.Name) + dockerMachine.Spec.InstanceName = machine.Name + } + // Create a helper for managing the docker container hosting the machine. - externalMachine, err := docker.NewMachine(ctx, cluster, machine.Name, nil) + externalMachine, err := docker.NewMachine(ctx, cluster, dockerMachine.Spec.InstanceName, nil) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalMachine") } @@ -210,12 +222,21 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * return ctrl.Result{}, nil } + _, machinePoolOwned := dockerMachine.Labels[clusterv1.MachinePoolOwnedLabel] + // if the machine is already provisioned, return if dockerMachine.Spec.ProviderID != nil { // ensure ready state is set. // This is required after move, because status is not moved to the target cluster. dockerMachine.Status.Ready = true + // We only create a DockerMachinePoolMachine if the providerID is set, which implies that the bootstrap has succeeded. + // TODO: verify this logic + if machinePoolOwned { + log.V(2).Info("Marking bootstrap true for machine pool owned machine") + conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) + } + if externalMachine.Exists() { conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) // Setting machine address is required after move, because status.Address field is not retained during move. @@ -228,51 +249,52 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * return ctrl.Result{}, nil } - // Make sure bootstrap data is available and populated. - if machine.Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { - log.Info("Waiting for the control plane to be initialized") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + if !machinePoolOwned { + // Make sure bootstrap data is available and populated. + if machine.Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + log.Info("Waiting for the control plane to be initialized") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + } + + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } - log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") - return ctrl.Result{}, nil - } - - // Create the docker container hosting the machine - role := constants.WorkerNodeRoleValue - if util.IsControlPlaneMachine(machine) { - role = constants.ControlPlaneNodeRoleValue - } + // Create the docker container hosting the machine + role := constants.WorkerNodeRoleValue + if util.IsControlPlaneMachine(machine) { + role = constants.ControlPlaneNodeRoleValue + } - // Create the machine if not existing yet - if !externalMachine.Exists() { - // NOTE: FailureDomains don't mean much in CAPD since it's all local, but we are setting a label on - // each container, so we can check placement. - if err := externalMachine.Create(ctx, dockerMachine.Spec.CustomImage, role, machine.Spec.Version, docker.FailureDomainLabel(machine.Spec.FailureDomain), dockerMachine.Spec.ExtraMounts); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create worker DockerMachine") + // Create the machine if not existing yet + if !machinePoolOwned && !externalMachine.Exists() { + // NOTE: FailureDomains don't mean much in CAPD since it's all local, but we are setting a label on + // each container, so we can check placement. + if err := externalMachine.Create(ctx, dockerMachine.Spec.CustomImage, role, machine.Spec.Version, docker.FailureDomainLabel(machine.Spec.FailureDomain), dockerMachine.Spec.ExtraMounts); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to create worker DockerMachine") + } } - } - // Preload images into the container - if len(dockerMachine.Spec.PreLoadImages) > 0 { - if err := externalMachine.PreloadLoadImages(ctx, dockerMachine.Spec.PreLoadImages); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to pre-load images into the DockerMachine") + // Preload images into the container + if len(dockerMachine.Spec.PreLoadImages) > 0 { + if err := externalMachine.PreloadLoadImages(ctx, dockerMachine.Spec.PreLoadImages); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to pre-load images into the DockerMachine") + } } - } - // if the machine is a control plane update the load balancer configuration - // we should only do this once, as reconfiguration more or less ensures - // node ref setting fails - if util.IsControlPlaneMachine(machine) && !dockerMachine.Status.LoadBalancerConfigured { - if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") + // if the machine is a control plane update the load balancer configuration + // we should only do this once, as reconfiguration more or less ensures + // node ref setting fails + if util.IsControlPlaneMachine(machine) && !dockerMachine.Status.LoadBalancerConfigured { + if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") + } + dockerMachine.Status.LoadBalancerConfigured = true } - dockerMachine.Status.LoadBalancerConfigured = true } - // Update the ContainerProvisionedCondition condition // NOTE: it is required to create the patch helper before this change otherwise it wont surface if // we issue a patch down in the code (because if we create patch helper after this point the ContainerProvisionedCondition=True exists both on before and after). @@ -303,38 +325,41 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * // but bootstrapped is never set on the object. We only try to bootstrap if the machine // is not already bootstrapped. if err := externalMachine.CheckForBootstrapSuccess(timeoutCtx, false); err != nil { - bootstrapData, format, err := r.getBootstrapData(timeoutCtx, machine) - if err != nil { - return ctrl.Result{}, err - } + if !machinePoolOwned { + bootstrapData, format, err := r.getBootstrapData(timeoutCtx, machine) + if err != nil { + return ctrl.Result{}, err + } - // Setup a go routing to check for the machine being deleted while running bootstrap as a - // synchronous process, e.g. due to remediation. The routine stops when timeoutCtx is Done - // (either because canceled intentionally due to machine deletion or canceled by the defer cancel() - // call when exiting from this func). - go func() { - for { - select { - case <-timeoutCtx.Done(): - return - default: - updatedDockerMachine := &infrav1.DockerMachine{} - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(dockerMachine), updatedDockerMachine); err == nil && - !updatedDockerMachine.DeletionTimestamp.IsZero() { - log.Info("Cancelling Bootstrap because the underlying machine has been deleted") - cancel() + // Setup a go routing to check for the machine being deleted while running bootstrap as a + // synchronous process, e.g. due to remediation. The routine stops when timeoutCtx is Done + // (either because canceled intentionally due to machine deletion or canceled by the defer cancel() + // call when exiting from this func). + go func() { + for { + select { + case <-timeoutCtx.Done(): return + default: + updatedDockerMachine := &infrav1.DockerMachine{} + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(dockerMachine), updatedDockerMachine); err == nil && + !updatedDockerMachine.DeletionTimestamp.IsZero() { + log.Info("Cancelling Bootstrap because the underlying machine has been deleted") + cancel() + return + } + time.Sleep(5 * time.Second) } - time.Sleep(5 * time.Second) } - } - }() + }() - // Run the bootstrap script. Simulates cloud-init/Ignition. - if err := externalMachine.ExecBootstrap(timeoutCtx, bootstrapData, format); err != nil { - conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") - return ctrl.Result{}, errors.Wrap(err, "failed to exec DockerMachine bootstrap") + // Run the bootstrap script. Simulates cloud-init/Ignition. + if err := externalMachine.ExecBootstrap(timeoutCtx, bootstrapData, format); err != nil { + conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") + return ctrl.Result{}, errors.Wrap(err, "failed to exec DockerMachine bootstrap") + } } + // Check for bootstrap success if err := externalMachine.CheckForBootstrapSuccess(timeoutCtx, true); err != nil { conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap")