diff --git a/.github/workflows/docker-image-push.yml b/.github/workflows/docker-image-push.yml new file mode 100644 index 0000000000..9f2ce89f15 --- /dev/null +++ b/.github/workflows/docker-image-push.yml @@ -0,0 +1,69 @@ +name: docker-image-push + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. +on: + push: + tags: + - 'v*' + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: adobe/external-dns + +jobs: + build: + + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.17 + id: go + + # Login against a Docker registry except on PR + # https://github.com/docker/login-action + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Extract metadata (tags, labels) for Docker + # https://github.com/docker/metadata-action + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v3 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + # Build and push Docker image with Buildx (don't push on PR) + # https://github.com/docker/build-push-action + - name: Build and push Docker image + uses: docker/build-push-action@v2 + with: + context: . + build-args: | + ARCH=amd64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/release-chart.yaml b/.github/workflows/release-chart.yaml index 0bc1780993..34399e4902 100644 --- a/.github/workflows/release-chart.yaml +++ b/.github/workflows/release-chart.yaml @@ -14,7 +14,7 @@ jobs: permissions: contents: write # to push chart release and create a release (helm/chart-releaser-action) - if: github.repository == 'kubernetes-sigs/external-dns' + if: github.repository == 'adobe/external-dns' runs-on: ubuntu-latest defaults: run: @@ -32,13 +32,6 @@ jobs: chart_version="$(grep -Po "(?<=^version: ).+" charts/external-dns/Chart.yaml)" echo "::set-output name=version::${chart_version}" - - name: Get changelog entry - id: changelog_reader - uses: mindsers/changelog-reader-action@5bfb30f7871d5c4cde50cd897314f37578043394 - with: - path: charts/external-dns/CHANGELOG.md - version: "v${{ steps.chart_version.outputs.version }}" - - name: Create release notes run: | set -euo pipefail diff --git a/Makefile b/Makefile index 1e00099d81..fc0c3ecb86 100644 --- a/Makefile +++ b/Makefile @@ -145,3 +145,30 @@ release.staging: release.prod: $(MAKE) build.push/multiarch + +# ================= Kind deployment + +KIND_CLUSTER="edns" + +kind-up: + kind create cluster \ + --image kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac \ + --name $(KIND_CLUSTER) \ + --config zarf/kind/kind-config.yaml + kubectl config set-context --current --namespace=default + +kind-down: + kind delete cluster --name $(KIND_CLUSTER) + +kind-load: + kind load docker-image "$(IMAGE):$(VERSION)" --name $(KIND_CLUSTER) + +kind-apply: + kubectl apply -f zarf/helm/rolebinding.yaml + helm template edns charts/external-dns -f zarf/helm/custom-values.yaml --set image.repository=$(IMAGE) --set image.tag=$(VERSION) | kubectl apply -f - + kubectl apply -f zarf/helm/service.yaml + +kind-update: build build.docker kind-load kind-apply + +kind-logs: + kubectl logs deployment/edns-external-dns -f diff --git a/charts/external-dns/Chart.yaml b/charts/external-dns/Chart.yaml index 72d1c1f3ba..776a8f2a8a 100644 --- a/charts/external-dns/Chart.yaml +++ b/charts/external-dns/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: external-dns description: ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS providers. type: application -version: 1.12.0 -appVersion: 0.13.1 +version: 1.12.1 +appVersion: 0.13.2-20230123-2320-adobe keywords: - kubernetes - externaldns @@ -21,4 +21,4 @@ maintainers: annotations: artifacthub.io/changes: | - kind: changed - description: "Updated ExternalDNS version to v0.13.1." + description: "Updated ExternalDNS version to v0.13.2." diff --git a/charts/external-dns/README.md b/charts/external-dns/README.md index cfbed3371a..d73d78fecc 100644 --- a/charts/external-dns/README.md +++ b/charts/external-dns/README.md @@ -7,7 +7,7 @@ Before you can install the chart you will need to add the `external-dns` repo to [Helm](https://helm.sh/). ```shell -helm repo add external-dns https://kubernetes-sigs.github.io/external-dns/ +helm repo add external-dns https://adobe.github.io/external-dns/ ``` After you've installed the repo you can install the chart. @@ -69,6 +69,7 @@ The following table lists the configurable parameters of the _ExternalDNS_ chart | `txtPrefix` | Prefix to create a TXT record with a name following the pattern `prefix.`. | `""` | | `domainFilters` | Limit possible target zones by domain suffixes. | `[]` | | `provider` | DNS provider where the DNS records will be created, for the available providers and how to configure them see the [README](https://github.com/kubernetes-sigs/external-dns#deploying-to-a-cluster) (this can be templated). | `aws` | +| `watchNamespaces` | List of namespaces to be observed for new DNS entries. | `[]` | | `extraArgs` | Extra arguments to pass to the _external-dns_ container, these are needed for provider specific arguments (these can be templated). | `[]` | | `deploymentStrategy` | .spec.strategy of the external-dns Deployment. Defaults to 'Recreate' since multiple external-dns pods may conflict with each other. | `{type: Recreate}` | | `secretConfiguration.enabled` | Enable additional secret configuration. | `false` | diff --git a/charts/external-dns/templates/deployment.yaml b/charts/external-dns/templates/deployment.yaml index 70952675d8..3e11214005 100644 --- a/charts/external-dns/templates/deployment.yaml +++ b/charts/external-dns/templates/deployment.yaml @@ -73,6 +73,9 @@ spec: {{- if .Values.triggerLoopOnEvent }} - --events {{- end }} + {{- if .Values.watchNamespaces }} + - --namespace={{ .Values.watchNamespaces | join "," }} + {{- end }} {{- range .Values.sources }} - --source={{ . }} {{- end }} diff --git a/charts/external-dns/values.yaml b/charts/external-dns/values.yaml index fd0070722e..f462fa9681 100644 --- a/charts/external-dns/values.yaml +++ b/charts/external-dns/values.yaml @@ -3,7 +3,7 @@ # Declare variables to be passed into your templates. image: - repository: registry.k8s.io/external-dns/external-dns + repository: ghcr.io/adobe/external-dns # Overrides the image tag whose default is v{{ .Chart.AppVersion }} tag: "" pullPolicy: IfNotPresent @@ -126,6 +126,8 @@ domainFilters: [] provider: aws +watchNamespaces: [] + extraArgs: [] secretConfiguration: diff --git a/source/compatibility.go b/source/compatibility.go index bc6e19abf9..f536d6cb3d 100644 --- a/source/compatibility.go +++ b/source/compatibility.go @@ -138,30 +138,32 @@ func legacyEndpointsFromDNSControllerNodePortService(svc *v1.Service, sc *servic return nil, nil } - nodes, err := sc.nodeInformer.Lister().List(labels.Everything()) - if err != nil { - return nil, err - } + for _, informer := range sc.informers { + nodes, err := informer.nodeInformer.Lister().List(labels.Everything()) + if err != nil { + return nil, err + } - var hostnameList []string - if isExternal { - hostnameList = strings.Split(strings.Replace(hostnameAnnotation, " ", "", -1), ",") - } else { - hostnameList = strings.Split(strings.Replace(internalHostnameAnnotation, " ", "", -1), ",") - } + var hostnameList []string + if isExternal { + hostnameList = strings.Split(strings.Replace(hostnameAnnotation, " ", "", -1), ",") + } else { + hostnameList = strings.Split(strings.Replace(internalHostnameAnnotation, " ", "", -1), ",") + } - for _, hostname := range hostnameList { - for _, node := range nodes { - _, isNode := node.Labels["node-role.kubernetes.io/node"] - if !isNode { - continue - } - for _, address := range node.Status.Addresses { - if address.Type == v1.NodeExternalIP && isExternal { - endpoints = append(endpoints, endpoint.NewEndpoint(hostname, endpoint.RecordTypeA, address.Address)) + for _, hostname := range hostnameList { + for _, node := range nodes { + _, isNode := node.Labels["node-role.kubernetes.io/node"] + if !isNode { + continue } - if address.Type == v1.NodeInternalIP && isInternal { - endpoints = append(endpoints, endpoint.NewEndpoint(hostname, endpoint.RecordTypeA, address.Address)) + for _, address := range node.Status.Addresses { + if address.Type == v1.NodeExternalIP && isExternal { + endpoints = append(endpoints, endpoint.NewEndpoint(hostname, endpoint.RecordTypeA, address.Address)) + } + if address.Type == v1.NodeInternalIP && isInternal { + endpoints = append(endpoints, endpoint.NewEndpoint(hostname, endpoint.RecordTypeA, address.Address)) + } } } } diff --git a/source/service.go b/source/service.go index 9c47579dda..61342f463f 100644 --- a/source/service.go +++ b/source/service.go @@ -57,14 +57,19 @@ type serviceSource struct { publishInternal bool publishHostIP bool alwaysPublishNotReadyAddresses bool - serviceInformer coreinformers.ServiceInformer - endpointsInformer coreinformers.EndpointsInformer - podInformer coreinformers.PodInformer - nodeInformer coreinformers.NodeInformer + informers []informersMap serviceTypeFilter map[string]struct{} labelSelector labels.Selector } +type informersMap struct { + namespace string + serviceInformer coreinformers.ServiceInformer + endpointsInformer coreinformers.EndpointsInformer + podInformer coreinformers.PodInformer + nodeInformer coreinformers.NodeInformer +} + // NewServiceSource creates a new serviceSource with the given config. func NewServiceSource(ctx context.Context, kubeClient kubernetes.Interface, namespace, annotationFilter string, fqdnTemplate string, combineFqdnAnnotation bool, compatibility string, publishInternal bool, publishHostIP bool, alwaysPublishNotReadyAddresses bool, serviceTypeFilter []string, ignoreHostnameAnnotation bool, labelSelector labels.Selector) (Source, error) { tmpl, err := parseTemplate(fqdnTemplate) @@ -72,45 +77,75 @@ func NewServiceSource(ctx context.Context, kubeClient kubernetes.Interface, name return nil, err } - // Use shared informers to listen for add/update/delete of services/pods/nodes in the specified namespace. - // Set resync period to 0, to prevent processing when nothing has changed - informerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 0, kubeinformers.WithNamespace(namespace)) - serviceInformer := informerFactory.Core().V1().Services() - endpointsInformer := informerFactory.Core().V1().Endpoints() - podInformer := informerFactory.Core().V1().Pods() - nodeInformer := informerFactory.Core().V1().Nodes() - - // Add default resource event handlers to properly initialize informer. - serviceInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + /* + Assume namespace can be "ns1,ns2,ns3" + Step 1: + Split by ',' + Step 2: + Modify interface to return map[string] + */ + + namespaces := removeDuplicateValues(strings.Split(namespace, ",")) + var informers []informersMap + + for _, entry := range namespaces { + // Use shared informers to listen for add/update/delete of services/pods/nodes in the specified namespace. + // Set resync period to 0, to prevent processing when nothing has changed + informerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 0, kubeinformers.WithNamespace(entry)) + serviceInformer := informerFactory.Core().V1().Services() + endpointsInformer := informerFactory.Core().V1().Endpoints() + podInformer := informerFactory.Core().V1().Pods() + + // Add default resource event handlers to properly initialize informer. + serviceInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + }, }, - }, - ) - endpointsInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + ) + endpointsInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + }, }, - }, - ) - podInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { + ) + podInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + }, }, - }, - ) - nodeInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - }, - }, - ) + ) - informerFactory.Start(ctx.Done()) + var nodeInformer coreinformers.NodeInformer + + if contains(serviceTypeFilter, "NodePort") || len(serviceTypeFilter) == 0 { + nodeInformer = informerFactory.Core().V1().Nodes() + + nodeInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + }, + }, + ) + } + + informersMap := informersMap{ + namespace: entry, + serviceInformer: serviceInformer, + endpointsInformer: endpointsInformer, + podInformer: podInformer, + nodeInformer: nodeInformer, + } + + informers = append(informers, informersMap) + + informerFactory.Start(ctx.Done()) + + // wait for the local cache to be populated. + if err := waitForCacheSync(context.Background(), informerFactory); err != nil { + return nil, err + } - // wait for the local cache to be populated. - if err := waitForCacheSync(context.Background(), informerFactory); err != nil { - return nil, err } // Transform the slice into a map so it will @@ -131,10 +166,7 @@ func NewServiceSource(ctx context.Context, kubeClient kubernetes.Interface, name publishInternal: publishInternal, publishHostIP: publishHostIP, alwaysPublishNotReadyAddresses: alwaysPublishNotReadyAddresses, - serviceInformer: serviceInformer, - endpointsInformer: endpointsInformer, - podInformer: podInformer, - nodeInformer: nodeInformer, + informers: informers, serviceTypeFilter: serviceTypes, labelSelector: labelSelector, }, nil @@ -142,65 +174,67 @@ func NewServiceSource(ctx context.Context, kubeClient kubernetes.Interface, name // Endpoints returns endpoint objects for each service that should be processed. func (sc *serviceSource) Endpoints(ctx context.Context) ([]*endpoint.Endpoint, error) { - services, err := sc.serviceInformer.Lister().Services(sc.namespace).List(sc.labelSelector) - if err != nil { - return nil, err - } - services, err = sc.filterByAnnotations(services) - if err != nil { - return nil, err - } + endpoints := []*endpoint.Endpoint{} + for _, informer := range sc.informers { - // filter on service types if at least one has been provided - if len(sc.serviceTypeFilter) > 0 { - services = sc.filterByServiceType(services) - } + services, err := informer.serviceInformer.Lister().Services(informer.namespace).List(sc.labelSelector) + if err != nil { + return nil, err + } - endpoints := []*endpoint.Endpoint{} + services, err = sc.filterByAnnotations(services) + if err != nil { + return nil, err + } - for _, svc := range services { - // Check controller annotation to see if we are responsible. - controller, ok := svc.Annotations[controllerAnnotationKey] - if ok && controller != controllerAnnotationValue { - log.Debugf("Skipping service %s/%s because controller value does not match, found: %s, required: %s", - svc.Namespace, svc.Name, controller, controllerAnnotationValue) - continue + // filter on service types if at least one has been provided + if len(sc.serviceTypeFilter) > 0 { + services = sc.filterByServiceType(services) } - svcEndpoints := sc.endpoints(svc) + for _, svc := range services { + // Check controller annotation to see if we are responsible. + controller, ok := svc.Annotations[controllerAnnotationKey] + if ok && controller != controllerAnnotationValue { + log.Debugf("Skipping service %s/%s because controller value does not match, found: %s, required: %s", + svc.Namespace, svc.Name, controller, controllerAnnotationValue) + continue + } - // process legacy annotations if no endpoints were returned and compatibility mode is enabled. - if len(svcEndpoints) == 0 && sc.compatibility != "" { - svcEndpoints, err = legacyEndpointsFromService(svc, sc) - if err != nil { - return nil, err + svcEndpoints := sc.endpoints(svc) + + // process legacy annotations if no endpoints were returned and compatibility mode is enabled. + if len(svcEndpoints) == 0 && sc.compatibility != "" { + svcEndpoints, err = legacyEndpointsFromService(svc, sc) + if err != nil { + return nil, err + } } - } - // apply template if none of the above is found - if (sc.combineFQDNAnnotation || len(svcEndpoints) == 0) && sc.fqdnTemplate != nil { - sEndpoints, err := sc.endpointsFromTemplate(svc) - if err != nil { - return nil, err + // apply template if none of the above is found + if (sc.combineFQDNAnnotation || len(svcEndpoints) == 0) && sc.fqdnTemplate != nil { + sEndpoints, err := sc.endpointsFromTemplate(svc) + if err != nil { + return nil, err + } + + if sc.combineFQDNAnnotation { + svcEndpoints = append(svcEndpoints, sEndpoints...) + } else { + svcEndpoints = sEndpoints + } } - if sc.combineFQDNAnnotation { - svcEndpoints = append(svcEndpoints, sEndpoints...) - } else { - svcEndpoints = sEndpoints + if len(svcEndpoints) == 0 { + log.Debugf("No endpoints could be generated from service %s/%s", svc.Namespace, svc.Name) + continue } - } - if len(svcEndpoints) == 0 { - log.Debugf("No endpoints could be generated from service %s/%s", svc.Namespace, svc.Name) - continue + log.Debugf("Endpoints generated from service: %s/%s: %v", svc.Namespace, svc.Name, svcEndpoints) + sc.setResourceLabel(svc, svcEndpoints) + endpoints = append(endpoints, svcEndpoints...) } - - log.Debugf("Endpoints generated from service: %s/%s: %v", svc.Namespace, svc.Name, svcEndpoints) - sc.setResourceLabel(svc, svcEndpoints) - endpoints = append(endpoints, svcEndpoints...) } - // this sorting is required to make merging work. // after we merge endpoints that have same DNS, we want to ensure that we end up with the same service being an "owner" // of all those records, as otherwise each time we update, we will end up with a different service that gets data merged in @@ -251,13 +285,17 @@ func (sc *serviceSource) extractHeadlessEndpoints(svc *v1.Service, hostname stri return nil } - endpointsObject, err := sc.endpointsInformer.Lister().Endpoints(svc.Namespace).Get(svc.GetName()) + // TO DO: treat no namespace given + // extract the informer for the specific namespace + informer := getInformerByNamespace(sc.informers, svc.Namespace) + + endpointsObject, err := informer.endpointsInformer.Lister().Endpoints(svc.Namespace).Get(svc.GetName()) if err != nil { log.Errorf("Get endpoints of service[%s] error:%v", svc.GetName(), err) return endpoints } - pods, err := sc.podInformer.Lister().Pods(svc.Namespace).List(selector) + pods, err := informer.podInformer.Lister().Pods(svc.Namespace).List(selector) if err != nil { log.Errorf("List pods of service[%s] error: %v", svc.GetName(), err) return endpoints @@ -299,7 +337,7 @@ func (sc *serviceSource) extractHeadlessEndpoints(svc *v1.Service, hostname stri targets := getTargetsFromTargetAnnotation(pod.Annotations) if len(targets) == 0 { if endpointsType == EndpointsTypeNodeExternalIP { - node, err := sc.nodeInformer.Lister().Get(pod.Spec.NodeName) + node, err := informer.nodeInformer.Lister().Get(pod.Spec.NodeName) if err != nil { log.Errorf("Get node[%s] of pod[%s] error: %v; not adding any NodeExternalIP endpoints", pod.Spec.NodeName, pod.GetName(), err) return endpoints @@ -564,6 +602,9 @@ func (sc *serviceSource) extractNodePortTargets(svc *v1.Service) (endpoint.Targe err error ) + // extract the informer for the specific namespace + informer := getInformerByNamespace(sc.informers, svc.Namespace) + switch svc.Spec.ExternalTrafficPolicy { case v1.ServiceExternalTrafficPolicyTypeLocal: nodesMap := map[*v1.Node]struct{}{} @@ -575,14 +616,14 @@ func (sc *serviceSource) extractNodePortTargets(svc *v1.Service) (endpoint.Targe if err != nil { return nil, err } - pods, err := sc.podInformer.Lister().Pods(svc.Namespace).List(selector) + pods, err := informer.podInformer.Lister().Pods(svc.Namespace).List(selector) if err != nil { return nil, err } for _, v := range pods { if v.Status.Phase == v1.PodRunning { - node, err := sc.nodeInformer.Lister().Get(v.Spec.NodeName) + node, err := informer.nodeInformer.Lister().Get(v.Spec.NodeName) if err != nil { log.Debugf("Unable to find node where Pod %s is running", v.Spec.Hostname) continue @@ -594,7 +635,7 @@ func (sc *serviceSource) extractNodePortTargets(svc *v1.Service) (endpoint.Targe } } default: - nodes, err = sc.nodeInformer.Lister().List(labels.Everything()) + nodes, err = informer.nodeInformer.Lister().List(labels.Everything()) if err != nil { return nil, err } @@ -666,7 +707,47 @@ func (sc *serviceSource) extractNodePortEndpoints(svc *v1.Service, nodeTargets e func (sc *serviceSource) AddEventHandler(ctx context.Context, handler func()) { log.Debug("Adding event handler for service") - // Right now there is no way to remove event handler from informer, see: - // https://github.com/kubernetes/kubernetes/issues/79610 - sc.serviceInformer.Informer().AddEventHandler(eventHandlerFunc(handler)) + for _, informer := range sc.informers { + // Right now there is no way to remove event handler from informer, see: + // https://github.com/kubernetes/kubernetes/issues/79610 + informer.serviceInformer.Informer().AddEventHandler(eventHandlerFunc(handler)) + } +} + +func contains(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + + return false +} + +func removeDuplicateValues(strSlice []string) []string { + keys := make(map[string]bool) + list := []string{} + + // If the key(values of the slice) is not equal + // to the already present value in new slice (list) + // then we append it. else we jump on another element. + for _, entry := range strSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + list = append(list, entry) + } + } + return list +} + +func getInformerByNamespace(informers []informersMap, namespace string) informersMap { + var informer informersMap + for _, entry := range informers { + if entry.namespace == namespace || entry.namespace == "" { + informer = entry + break + } + } + + return informer } diff --git a/zarf/helm/custom-values.yaml b/zarf/helm/custom-values.yaml new file mode 100644 index 0000000000..1d21830657 --- /dev/null +++ b/zarf/helm/custom-values.yaml @@ -0,0 +1,15 @@ +rbac: + # Specifies whether RBAC resources should be created + create: false + +sources: + - service + +interval: 10s +logLevel: debug +extraArgs: + - --domain-filter=test.domain.net + - --namespace=default + - --service-type-filter=ExternalName + +provider: inmemory diff --git a/zarf/helm/rolebinding.yaml b/zarf/helm/rolebinding.yaml new file mode 100644 index 0000000000..14c7b81a05 --- /dev/null +++ b/zarf/helm/rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rb-external-dns + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: admin +subjects: +- kind: ServiceAccount + name: edns-external-dns + namespace: default diff --git a/zarf/helm/service.yaml b/zarf/helm/service.yaml new file mode 100644 index 0000000000..755844ea85 --- /dev/null +++ b/zarf/helm/service.yaml @@ -0,0 +1,9 @@ +kind: Service +apiVersion: v1 +metadata: + name: inmemory-service2 + annotations: + external-dns.alpha.kubernetes.io/hostname: my-dns-test.test.domain.net +spec: + type: ExternalName + externalName: 192.168.10.10 diff --git a/zarf/kind/kind-config.yaml b/zarf/kind/kind-config.yaml new file mode 100644 index 0000000000..329c4b13db --- /dev/null +++ b/zarf/kind/kind-config.yaml @@ -0,0 +1,17 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 3000 + hostPort: 3000 + - containerPort: 3001 + hostPort: 3001 + - containerPort: 4000 + hostPort: 4000 + - containerPort: 4001 + hostPort: 4001 + - containerPort: 9411 + hostPort: 9411 + - containerPort: 5432 + hostPort: 5432