diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7c0f59227ff6..bc18b31b18a4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -69,6 +69,7 @@ jobs: - docker\+containerd # same as docker, but with containerd snapshotter - docker-container - remote + - kubernetes pkg: - ./tests include: diff --git a/Dockerfile b/Dockerfile index 835b1f280088..0189e3ccf0d1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ ARG DOCKER_VERSION=24.0.6 ARG GOTESTSUM_VERSION=v1.9.0 ARG REGISTRY_VERSION=2.8.0 ARG BUILDKIT_VERSION=v0.11.6 +ARG K3S_VERSION=v1.21.2-k3s1 # xx is a helper for cross-compilation FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx @@ -21,7 +22,7 @@ ENV CGO_ENABLED=0 WORKDIR /src FROM registry:$REGISTRY_VERSION AS registry - +FROM rancher/k3s:${K3S_VERSION} AS k3s FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit FROM gobase AS docker @@ -103,12 +104,43 @@ RUN apk add --no-cache \ shadow-uidmap \ xfsprogs \ xz +# k3s deps +RUN apk add --no-cache \ + busybox-binsh \ + cni-plugins \ + cni-plugin-flannel \ + conntrack-tools \ + coreutils \ + dbus \ + findutils \ + ipset +ENV PATH="/usr/libexec/cni:${PATH}" COPY --link --from=gotestsum /out/gotestsum /usr/bin/ COPY --link --from=registry /bin/registry /usr/bin/ COPY --link --from=docker /opt/docker/* /usr/bin/ +COPY --link --from=k3s /bin/k3s /usr/bin/ +COPY --link --from=k3s /bin/kubectl /usr/bin/ COPY --link --from=buildkit /usr/bin/buildkitd /usr/bin/ COPY --link --from=buildkit /usr/bin/buildctl /usr/bin/ COPY --link --from=binaries /buildx /usr/bin/ +COPY <<-"EOF" /entrypoint.sh +#!/bin/sh +set -e +# cgroup v2: enable nesting +# https://github.com/moby/moby/blob/v25.0.0/hack/dind#L59-L69 +if [ -f /sys/fs/cgroup/cgroup.controllers ]; then + # move the processes from the root group to the /init group, + # otherwise writing subtree_control fails with EBUSY. + # An error during moving non-existent process (i.e., "cat") is ignored. + mkdir -p /sys/fs/cgroup/init + xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || : + # enable controllers + sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers > /sys/fs/cgroup/cgroup.subtree_control +fi +exec "$@" +EOF +RUN chmod +x /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] FROM integration-test-base AS integration-test COPY . . diff --git a/go.mod b/go.mod index 65f2f1ec777b..2eab3f9d7924 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.21 require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/aws/aws-sdk-go-v2/config v1.18.16 + github.com/cenkalti/backoff/v4 v4.2.1 github.com/compose-spec/compose-go v1.20.0 github.com/containerd/console v1.0.3 github.com/containerd/containerd v1.7.11 @@ -76,7 +77,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/ttrpc v1.2.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/tests/helpers/k3s.go b/tests/helpers/k3s.go new file mode 100644 index 000000000000..da4f10f52dee --- /dev/null +++ b/tests/helpers/k3s.go @@ -0,0 +1,112 @@ +package helpers + +import ( + "bytes" + "fmt" + "net" + "os" + "os/exec" + "strconv" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/moby/buildkit/util/testutil/integration" + "github.com/pkg/errors" +) + +const ( + k3sBin = "k3s" + kubeCtlBin = "kubectl" +) + +func NewK3sServer(cfg *integration.BackendConfig) (kubeConfig string, cl func() error, err error) { + if _, err := exec.LookPath(k3sBin); err != nil { + return "", nil, errors.Wrapf(err, "failed to lookup %s binary", k3sBin) + } + if _, err := exec.LookPath(kubeCtlBin); err != nil { + return "", nil, errors.Wrapf(err, "failed to lookup %s binary", kubeCtlBin) + } + + deferF := &integration.MultiCloser{} + cl = deferF.F() + + defer func() { + if err != nil { + deferF.F()() + cl = nil + } + }() + + cfgfile, err := os.CreateTemp("", "kubeconfig*.yml") + if err != nil { + return "", nil, err + } + kubeConfig = cfgfile.Name() + deferF.Append(func() error { + return os.Remove(cfgfile.Name()) + }) + + k3sDataDir, err := os.MkdirTemp("", "kubedata") + if err != nil { + return "", nil, err + } + deferF.Append(func() error { + return os.RemoveAll(k3sDataDir) + }) + + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + return "", nil, err + } + _ = l.Close() + + lport := strconv.Itoa(l.Addr().(*net.TCPAddr).Port) + nodeName := "integrationk3s" + + stop, err := integration.StartCmd(exec.Command(k3sBin, "server", + "--bind-address", "127.0.0.1", + "--https-listen-port", lport, + "--data-dir", k3sDataDir, // write to /tmp for overlayfs support + "--write-kubeconfig", cfgfile.Name(), + "--write-kubeconfig-mode", "666", + "--node-name", nodeName, + ), cfg.Logs) + if err != nil { + return "", nil, err + } + + if err = waitK3s(cfg, kubeConfig, nodeName); err != nil { + stop() + return "", nil, errors.Wrapf(err, "k3s did not start up: %s", integration.FormatLogs(cfg.Logs)) + } + + deferF.Append(stop) + return +} + +func waitK3s(cfg *integration.BackendConfig, kubeConfig string, nodeName string) error { + logbuf := new(bytes.Buffer) + defer func() { + cfg.Logs["waitk3s: "] = logbuf + }() + + boff := backoff.NewExponentialBackOff() + boff.InitialInterval = 3 * time.Second + boff.MaxInterval = 5 * time.Second + boff.MaxElapsedTime = 2 * time.Minute + + if err := backoff.Retry(func() error { + cmd := exec.Command(kubeCtlBin, "--kubeconfig", kubeConfig, "wait", "--for=condition=Ready", "node/"+nodeName) + out, err := cmd.CombinedOutput() + if err == nil && bytes.Contains(out, []byte("condition met")) { + logbuf.WriteString(fmt.Sprintf("%s %s", cmd.String(), string(out))) + return nil + } + return errors.Wrapf(err, "node is not ready: %s %s", cmd.String(), string(out)) + }, boff); err != nil { + logbuf.WriteString(errors.Unwrap(err).Error()) + return err + } + + return nil +} diff --git a/tests/integration_test.go b/tests/integration_test.go index e38e46cab5d3..3c201c56b90e 100644 --- a/tests/integration_test.go +++ b/tests/integration_test.go @@ -16,6 +16,7 @@ func init() { workers.InitDockerContainerWorker() } else { workers.InitRemoteWorker() + workers.InitKubernetesWorker() } } diff --git a/tests/workers/kubernetes.go b/tests/workers/kubernetes.go new file mode 100644 index 000000000000..2476a59f52e2 --- /dev/null +++ b/tests/workers/kubernetes.go @@ -0,0 +1,96 @@ +package workers + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/docker/buildx/tests/helpers" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/testutil/integration" + "github.com/pkg/errors" +) + +func InitKubernetesWorker() { + integration.Register(&kubernetesWorker{ + id: "kubernetes", + }) +} + +type kubernetesWorker struct { + id string + + unsupported []string + + k3sConfig string + k3sClose func() error + k3sErr error + k3sOnce sync.Once +} + +func (w *kubernetesWorker) Name() string { + return w.id +} + +func (w *kubernetesWorker) Rootless() bool { + return false +} + +func (w *kubernetesWorker) New(ctx context.Context, cfg *integration.BackendConfig) (integration.Backend, func() error, error) { + var err error + + w.k3sOnce.Do(func() { + w.k3sConfig, w.k3sClose, w.k3sErr = helpers.NewK3sServer(cfg) + }) + if w.k3sErr != nil { + return nil, w.k3sClose, w.k3sErr + } + + cfgfile, err := integration.WriteConfig(cfg.DaemonConfig) + if err != nil { + return nil, nil, err + } + defer os.RemoveAll(filepath.Dir(cfgfile)) + + name := "integration-kubernetes-" + identity.NewID() + cmd := exec.Command("buildx", "create", + "--bootstrap", + "--name="+name, + "--config="+cfgfile, + "--driver=kubernetes", + ) + cmd.Env = append( + os.Environ(), + "BUILDX_CONFIG=/tmp/buildx-"+name, + "KUBECONFIG="+w.k3sConfig, + ) + if err := cmd.Run(); err != nil { + return nil, nil, errors.Wrapf(err, "failed to create buildx instance %s", name) + } + + cl := func() error { + cmd := exec.Command("buildx", "rm", "-f", name) + return cmd.Run() + } + + return &backend{ + builder: name, + unsupportedFeatures: w.unsupported, + }, cl, nil +} + +func (w *kubernetesWorker) Close() error { + if c := w.k3sClose; c != nil { + return c() + } + + // reset the worker to be ready to go again + w.k3sConfig = "" + w.k3sClose = nil + w.k3sErr = nil + w.k3sOnce = sync.Once{} + + return nil +}