Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] ✨ Add MachinePool Machines implementation #6089

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 55 additions & 4 deletions cmd/clusterctl/client/tree/discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,14 @@ package tree
import (
"context"

// corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/external"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
)

Expand Down Expand Up @@ -76,9 +79,9 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt
}

// Adds control plane
controlPLane, err := external.Get(ctx, c, cluster.Spec.ControlPlaneRef, cluster.Namespace)
controlPlane, err := external.Get(ctx, c, cluster.Spec.ControlPlaneRef, cluster.Namespace)
if err == nil {
tree.Add(cluster, controlPLane, ObjectMetaName("ControlPlane"), GroupingObject(true))
tree.Add(cluster, controlPlane, ObjectMetaName("ControlPlane"), GroupingObject(true))
}

// Adds control plane machines.
Expand All @@ -96,6 +99,7 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt
tree.Add(m, machineInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true))
}

// Note: this reference doesn't exist on MachinePool Machines
if machineBootstrap, err := external.Get(ctx, c, m.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil {
tree.Add(m, machineBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true))
}
Expand All @@ -105,10 +109,15 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt
controlPlaneMachines := selectControlPlaneMachines(machinesList)
for i := range controlPlaneMachines {
cp := controlPlaneMachines[i]
addMachineFunc(controlPLane, cp)
addMachineFunc(controlPlane, cp)
}

if len(machinesList.Items) == len(controlPlaneMachines) {
machinePoolList, err := getMachinePoolsInCluster(ctx, c, cluster.Namespace, cluster.Name)
if err != nil {
return nil, err
}

if len(machinesList.Items) == len(controlPlaneMachines) && len(machinePoolList.Items) == 0 {
return tree, nil
}

Expand Down Expand Up @@ -151,6 +160,11 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt
}
}

err = addMachinePoolsToObjectTree(ctx, c, cluster, workers, machinePoolList, machinesList, tree, addMachineFunc)
if err != nil {
return nil, err
}

// Handles orphan machines.
if len(machineMap) < len(machinesList.Items) {
other := VirtualObject(cluster.Namespace, "OtherGroup", "Other")
Expand All @@ -168,6 +182,28 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt
return tree, nil
}

func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, workers *unstructured.Unstructured, machinePoolList *expv1.MachinePoolList, machinesList *clusterv1.MachineList, tree *ObjectTree, addMachineFunc func(parent client.Object, m *clusterv1.Machine)) error {
for i := range machinePoolList.Items {
mp := &machinePoolList.Items[i]
tree.Add(workers, mp, GroupingObject(true))
if machinePoolInfra, err := external.Get(ctx, c, &mp.Spec.Template.Spec.InfrastructureRef, cluster.Namespace); err == nil {
tree.Add(mp, machinePoolInfra, ObjectMetaName("MachinePoolInfrastructure"), NoEcho(true))
}

if machinePoolBootstrap, err := external.Get(ctx, c, mp.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil {
tree.Add(mp, machinePoolBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true))
// TODO: should this BootstrapConfig go under the MachinePool or individual Machine?
}

machines := selectMachinesControlledBy(machinesList, mp)
for _, w := range machines {
addMachineFunc(mp, w)
}
}

return nil
}

func getMachinesInCluster(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachineList, error) {
if name == "" {
return nil, nil
Expand Down Expand Up @@ -198,6 +234,21 @@ func getMachineDeploymentsInCluster(ctx context.Context, c client.Client, namesp
return machineDeploymentList, nil
}

func getMachinePoolsInCluster(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePoolList, error) {
if name == "" {
return nil, nil
}

machinePoolList := &expv1.MachinePoolList{}
labels := map[string]string{clusterv1.ClusterLabelName: name}

if err := c.List(ctx, machinePoolList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil {
return nil, err
}

return machinePoolList, nil
}

func getMachineSetsInCluster(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachineSetList, error) {
if name == "" {
return nil, nil
Expand Down
2 changes: 2 additions & 0 deletions cmd/clusterctl/internal/scheme/scheme.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
)

var (
Expand All @@ -44,4 +45,5 @@ func init() {
_ = admissionregistration.AddToScheme(Scheme)
_ = admissionregistrationv1beta1.AddToScheme(Scheme)
_ = addonsv1.AddToScheme(Scheme)
_ = expv1.AddToScheme(Scheme)
}
76 changes: 72 additions & 4 deletions config/crd/bases/cluster.x-k8s.io_machinepools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1017,17 +1017,85 @@ spec:
items:
type: string
type: array
infrastructureRefList:
description: InfrastructureRefList are the infrastructure references
of machine instances, populated by the provider. This field must
contain the infrastructure references of all instances in the machine
pool. If this list is populated by the provider, `ProviderIDList`
should be left empty.
items:
description: 'ObjectReference contains enough information to let
you inspect or modify the referred object. --- New uses of this
type are discouraged because of difficulty describing its usage
when embedded in APIs. 1. Ignored fields. It includes many fields
which are not generally honored. For instance, ResourceVersion
and FieldPath are both very rarely valid in actual usage. 2. Invalid
usage help. It is impossible to add specific help for individual
usage. In most embedded usages, there are particular restrictions
like, "must refer only to types A and B" or "UID not honored"
or "name must be restricted". Those cannot be well described when
embedded. 3. Inconsistent validation. Because the usages are
different, the validation rules are different by usage, which
makes it hard for users to predict what will happen. 4. The fields
are both imprecise and overly precise. Kind is not a precise
mapping to a URL. This can produce ambiguity during interpretation
and require a REST mapping. In most cases, the dependency is
on the group,resource tuple and the version of the actual struct
is irrelevant. 5. We cannot easily change it. Because this type
is embedded in many locations, updates to this type will affect
numerous schemas. Don''t make new APIs embed an underspecified
API type they do not control. Instead of using this type, create
a locally provided and used type that is well-focused on your
reference. For example, ServiceReferences for admission registration:
https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
.'
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
minReadySeconds:
description: Minimum number of seconds for which a newly created machine
instances should be ready. Defaults to 0 (machine instance will
be considered available as soon as it is ready)
format: int32
type: integer
providerIDList:
description: ProviderIDList are the identification IDs of machine
instances provided by the provider. This field must match the provider
IDs as seen on the node objects corresponding to a machine pool's
machine instances.
description: ProviderIDList are the identifiers of machine instances
populated by the provider. This field must match the provider IDs
as seen on the node objects corresponding to a machine pool's machine
instances. If this list is populated by the provider, `InfrastructureRefList`
should be left empty.
items:
type: string
type: array
Expand Down
116 changes: 116 additions & 0 deletions docker-cluster.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
# Creates a cluster with one control-plane node and one worker node
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: my-cluster
namespace: default
spec:
clusterNetwork:
services:
cidrBlocks: ["10.96.0.0/12"]
pods:
cidrBlocks: ["192.168.0.0/16"]
serviceDomain: cluster.local
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: controlplane
namespace: default
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
name: my-cluster
namespace: default
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: controlplane
namespace: default
spec:
replicas: 1
version: v1.22.4
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: controlplane
namespace: default
kubeadmConfigSpec:
clusterConfiguration:
controllerManager:
extraArgs:
enable-hostpath-provisioner: "true"
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroup-driver: cgroupfs
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
preKubeadmCommands:
- sysctl -w fs.inotify.max_user_watches=1048576
- sysctl -w fs.inotify.max_user_instances=512
- sysctl -w vm.max_map_count=524288
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
metadata:
name: my-cluster
namespace: default
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: controlplane
namespace: default
spec:
template:
spec: {}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachinePool
metadata:
name: worker-mp-0
namespace: default
annotations:
cluster.k8s.io/cluster-api-autoscaler-node-group-min-size: "1"
cluster.k8s.io/cluster-api-autoscaler-node-group-max-size: "10"
spec:
clusterName: my-cluster
replicas: 1
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfig
name: worker-mp-0-config
namespace: default
clusterName: my-cluster
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachinePool
name: worker-dmp-0
namespace: default
version: v1.22.4
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachinePool
metadata:
name: worker-dmp-0
namespace: default
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfig
metadata:
name: worker-mp-0-config
namespace: default
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroup-driver: cgroupfs
eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
Loading