Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CNF-14679: Adapt to single H/W manager plugin #220

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,8 @@
"program": "${workspaceFolder}",
"env": {
"KUBECONFIG": "${env:KUBECONFIG}",
"IMAGE": "quay.io/openshift-kni/oran-o2ims-operator:latest"
"IMAGE": "quay.io/openshift-kni/oran-o2ims-operator:latest",
"HWMGR_PLUGIN_NAMESPACE": "oran-hwmgr-plugin"
},
"args": [
"start",
Expand Down
38 changes: 35 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ IMG ?= $(IMAGE_TAG_BASE):$(VERSION)
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.28.0

# HWMGR_PLUGIN_NAMESPACE refers to the namespace of the hardware manager plugin.
HWMGR_PLUGIN_NAMESPACE ?= oran-hwmgr-plugin

# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
Expand Down Expand Up @@ -169,6 +172,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified

.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
@$(KUBECTL) create configmap env-config --from-literal=HWMGR_PLUGIN_NAMESPACE=$(HWMGR_PLUGIN_NAMESPACE) --dry-run=client -o yaml > config/manager/env-config.yaml
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -

Expand All @@ -184,7 +188,8 @@ $(LOCALBIN):
mkdir -p $(LOCALBIN)

## Tool Binaries
KUBECTL ?= kubectl
SYSTEM_KUBECTL := $(shell command -v kubectl)
KUBECTL ?= $(LOCALBIN)/kubectl
KUSTOMIZE ?= $(LOCALBIN)/kustomize
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
ENVTEST ?= $(LOCALBIN)/setup-envtest
Expand All @@ -193,6 +198,30 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest
KUSTOMIZE_VERSION ?= v5.2.1
CONTROLLER_TOOLS_VERSION ?= v0.15.0

## Detect the platform (OS and architecture)
OS := $(shell uname | tr '[:upper:]' '[:lower:]')
ARCH := $(shell uname -m | sed -e 's/x86_64/amd64/' \
-e 's/aarch64/arm64/' \
-e 's/armv7l/arm/')

.PHONY: kubectl
kubectl: $(KUBECTL) ## Ensure kubectl exists, first check system path, then download it locally if necessary
$(KUBECTL): $(LOCALBIN)
@if [ -n "$(SYSTEM_KUBECTL)" ]; then \
echo "Using system kubectl: $(SYSTEM_KUBECTL)"; \
ln -sf $(SYSTEM_KUBECTL) $(LOCALBIN)/kubectl; \
else \
echo "System kubectl not found, downloading to $(LOCALBIN)..."; \
if [ "$(OS)" = "darwin" ]; then \
curl -LO "https://dl.k8s.io/release/$$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/$(ARCH)/kubectl"; \
else \
curl -LO "https://dl.k8s.io/release/$$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(ARCH)/kubectl"; \
fi; \
chmod +x kubectl; \
mv kubectl $(LOCALBIN)/kubectl; \
fi


.PHONY: kustomize
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading.
$(KUSTOMIZE): $(LOCALBIN)
Expand Down Expand Up @@ -230,10 +259,12 @@ ifneq ($(OPERATOR_SDK_VERSION),$(OPERATOR_SDK_VERSION_INSTALLED))
endif

.PHONY: bundle
bundle: operator-sdk manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
bundle: operator-sdk manifests kustomize kubectl ## Generate bundle manifests and metadata, then validate generated files.
$(OPERATOR_SDK) generate kustomize manifests --apis-dir api/ -q
@$(KUBECTL) create configmap env-config --from-literal=HWMGR_PLUGIN_NAMESPACE=$(HWMGR_PLUGIN_NAMESPACE) --dry-run=client -o yaml > config/manager/env-config.yaml
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS)
@rm bundle/manifests/oran-o2ims-env-config_v1_configmap.yaml ## Clean up the temporary file for bundle validate
$(OPERATOR_SDK) bundle validate ./bundle
sed -i '/^[[:space:]]*createdAt:/d' bundle/manifests/oran-o2ims.clusterserviceversion.yaml

Expand Down Expand Up @@ -302,7 +333,7 @@ go-generate:
.PHONY: test tests
test tests:
@echo "Run ginkgo"
ginkgo run -r $(ginkgo_flags)
HWMGR_PLUGIN_NAMESPACE=hwmgr ginkgo run -r $(ginkgo_flags)

.PHONY: fmt
fmt:
Expand All @@ -329,6 +360,7 @@ ci-job: deps-update generate fmt vet lint fmt test bundle-check

.PHONY: clean
clean:
@rm config/manager/env-config.yaml # Clean up the temporary file
rm -rf \
oran-o2ims \
$(NULL)
11 changes: 11 additions & 0 deletions api/hardwaremanagement/v1alpha1/node_pools.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ type NodePoolSpec struct {
//+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Location Spec",xDescriptors={"urn:alm:descriptor:com.tectonic.ui:text"}
LocationSpec `json:",inline"`

// HwMgrId is the identifier for the hardware manager plugin adaptor.
HwMgrId string `json:"hwMgrId,omitempty"`

NodeGroup []NodeGroup `json:"nodeGroup"`
}

Expand All @@ -57,6 +60,11 @@ type Properties struct {
NodeNames []string `json:"nodeNames,omitempty"`
}

// GenerationStatus represents the observed generation for an operator.
type GenerationStatus struct {
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}

// NodePoolStatus describes the observed state of a request to allocate and prepare
// a node that will eventually be part of a deployment manager.
type NodePoolStatus struct {
Expand All @@ -66,6 +74,9 @@ type NodePoolStatus struct {
// Conditions represent the observations of the NodePool's current state.
// Possible values of the condition type are `Provisioned` and `Unknown`.
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`

CloudManager GenerationStatus `json:"cloudManager,omitempty"`
HwMgrPlugin GenerationStatus `json:"hwMgrPlugin,omitempty"`
}

// NodePool is the schema for an allocation request of nodes
Expand Down
17 changes: 17 additions & 0 deletions api/hardwaremanagement/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions bundle/manifests/oran-o2ims.clusterserviceversion.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ spec:
manager may want to use this to tag the nodes in its database, and to generate
statistics.
type: string
hwMgrId:
description: HwMgrId is the identifier for the hardware manager plugin
adaptor.
type: string
location:
description: Location
type: string
Expand Down Expand Up @@ -83,6 +87,14 @@ spec:
NodePoolStatus describes the observed state of a request to allocate and prepare
a node that will eventually be part of a deployment manager.
properties:
cloudManager:
description: GenerationStatus represents the observed generation for
an operator.
properties:
observedGeneration:
format: int64
type: integer
type: object
conditions:
description: |-
Conditions represent the observations of the NodePool's current state.
Expand Down Expand Up @@ -155,6 +167,14 @@ spec:
- type
type: object
type: array
hwMgrPlugin:
description: GenerationStatus represents the observed generation for
an operator.
properties:
observedGeneration:
format: int64
type: integer
type: object
properties:
description: Properties represent the node properties in the pool
properties:
Expand Down
1 change: 1 addition & 0 deletions config/manager/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
env-config.yaml
11 changes: 11 additions & 0 deletions config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ kind: Kustomization

resources:
- manager.yaml
- env-config.yaml

generatorOptions:
disableNameSuffixHash: true
Expand All @@ -28,3 +29,13 @@ replacements:
select:
kind: Deployment
name: controller-manager
- source:
fieldPath: data.HWMGR_PLUGIN_NAMESPACE
kind: ConfigMap
name: env-config
targets:
- fieldPaths:
- spec.template.spec.containers.[name=manager].env.[name=HWMGR_PLUGIN_NAMESPACE].value
select:
kind: Deployment
name: controller-manager
3 changes: 3 additions & 0 deletions config/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,9 @@ spec:
- name: KUBE_RBAC_PROXY_IMAGE
# A placeholder for the replacement kustomization that will inject the rbac image from that container spec
value: kube-rbac-proxy:latest
- name: HWMGR_PLUGIN_NAMESPACE
# A placeholder for the replacement kustomization that will inject the value from the config map
value: plugin-namespace-placeholder
command:
- /usr/bin/oran-o2ims
- start
Expand Down
8 changes: 7 additions & 1 deletion internal/controllers/clusterrequest_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -730,9 +730,10 @@ func (t *clusterRequestReconcilerTask) handleRenderHardwareTemplate(ctx context.
nodePool.Spec.CloudID = clusterInstance.GetName()
nodePool.Spec.LocationSpec = t.object.Spec.LocationSpec
nodePool.Spec.Site = t.object.Spec.Site
nodePool.Spec.HwMgrId = hwTemplateCm.Data[utils.HwTemplatePluginMgr]
nodePool.Spec.NodeGroup = nodeGroup
nodePool.ObjectMeta.Name = clusterInstance.GetName()
nodePool.ObjectMeta.Namespace = hwTemplateCm.Data[utils.HwTemplatePluginMgr]
nodePool.ObjectMeta.Namespace = utils.GetHwMgrPluginNS()

// Add boot interface label to the generated nodePool
annotation := make(map[string]string)
Expand Down Expand Up @@ -1558,6 +1559,11 @@ func (t *clusterRequestReconcilerTask) createNodePoolResources(ctx context.Conte
nodePool.GetNamespace(),
),
)
// Set the CloudManager's ObservedGeneration on the node pool resource status field
err = utils.SetCloudManagerGenerationStatus(ctx, t.client, nodePool)
if err != nil {
return fmt.Errorf("failed to set CloudManager's ObservedGeneration: %w", err)
}
return nil
}

Expand Down
3 changes: 2 additions & 1 deletion internal/controllers/clusterrequest_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1890,10 +1890,11 @@ var _ = Describe("renderHardwareTemplate", func() {
Expect(err).ToNot(HaveOccurred())
Expect(nodePool).ToNot(BeNil())
Expect(nodePool.ObjectMeta.Name).To(Equal(clusterInstance.GetName()))
Expect(nodePool.ObjectMeta.Namespace).To(Equal(cm.Data[utils.HwTemplatePluginMgr]))
Expect(nodePool.ObjectMeta.Namespace).To(Equal(utils.UnitTestHwmgrNamespace))
Expect(nodePool.Annotations[utils.HwTemplateBootIfaceLabel]).To(Equal(cm.Data[utils.HwTemplateBootIfaceLabel]))

Expect(nodePool.Spec.CloudID).To(Equal(clusterInstance.GetName()))
Expect(nodePool.Spec.HwMgrId).To(Equal(cm.Data[utils.HwTemplatePluginMgr]))
Expect(nodePool.Labels[clusterRequestNameLabel]).To(Equal(task.object.Name))
Expect(nodePool.Labels[clusterRequestNamespaceLabel]).To(Equal(task.object.Namespace))

Expand Down
2 changes: 2 additions & 0 deletions internal/controllers/utils/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ const (
UnitTestHwmgrID = "hwmgr"
UnitTestHwmgrNamespace = "hwmgr"
TempDellPluginNamespace = "dell-hwmgr"
DefaultPluginNamespace = "oran-hwmgr-plugin"
)

// POD Container Names
Expand All @@ -162,4 +163,5 @@ const (
const (
ServerImageName = "IMAGE"
KubeRbacProxyImageName = "KUBE_RBAC_PROXY_IMAGE"
HwMgrPluginNameSpace = "HWMGR_PLUGIN_NAMESPACE"
)
42 changes: 41 additions & 1 deletion internal/controllers/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"slices"
"strconv"
"strings"
"sync"
"text/template"
"time"

Expand Down Expand Up @@ -41,7 +42,11 @@ import (
"sigs.k8s.io/yaml"
)

var oranUtilsLog = ctrl.Log.WithName("oranUtilsLog")
var (
oranUtilsLog = ctrl.Log.WithName("oranUtilsLog")
hwMgrPluginNameSpace string
once sync.Once
)

func UpdateK8sCRStatus(ctx context.Context, c client.Client, object client.Object) error {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
Expand Down Expand Up @@ -1077,3 +1082,38 @@ func GetEnvOrDefault(name, defaultValue string) string {
}
return value
}

// GetHwMgrPluginNS returns the value of environment variable HWMGR_PLUGIN_NAMESPACE
func GetHwMgrPluginNS() string {
// Ensure that this code only runs once
once.Do(func() {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This once.Do is new to me... Very cool!

hwMgrPluginNameSpace = GetEnvOrDefault(HwMgrPluginNameSpace, DefaultPluginNamespace)
})
return hwMgrPluginNameSpace
}

// SetCloudManagerGenerationStatus sets the CloudManager's ObservedGeneration on the node pool resource status field
func SetCloudManagerGenerationStatus(ctx context.Context, c client.Client, nodePool *hwv1alpha1.NodePool) error {
// Get the generated NodePool and its metadata.generation
exists, err := DoesK8SResourceExist(ctx, c, nodePool.GetName(),
nodePool.GetNamespace(), nodePool)
if err != nil {
return fmt.Errorf("failed to get NodePool %s in namespace %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
if !exists {
return fmt.Errorf("nodePool %s does not exist in namespace %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
// We only set ObservedGeneration when the NodePool is first created because we do not update the spec after creation.
// Once ObservedGeneration is set, no need to update it again.
if nodePool.Status.CloudManager.ObservedGeneration != 0 {
// ObservedGeneration is already set, so we do nothing.
return nil
}
// Set ObservedGeneration to the current generation of the resource
nodePool.Status.CloudManager.ObservedGeneration = nodePool.ObjectMeta.Generation
err = UpdateK8sCRStatus(ctx, c, nodePool)
if err != nil {
return fmt.Errorf("failed to update status for NodePool %s %s: %w", nodePool.GetName(), nodePool.GetNamespace(), err)
}
return nil
}
Loading