diff --git a/.prow.yaml b/.prow.yaml index 65d71af..a94ac7f 100644 --- a/.prow.yaml +++ b/.prow.yaml @@ -93,3 +93,22 @@ presubmits: # docker-in-docker needs privileged mode securityContext: privileged: true + + - name: pull-kcp-operator-test-kcp-e2e + always_run: true + decorate: true + clone_uri: "https://github.com/kcp-dev/kcp-operator" + labels: + preset-goproxy: "true" + spec: + containers: + - image: ghcr.io/kcp-dev/infra/build:1.23.7-2 + command: + - hack/ci/run-kcp-e2e-tests.sh + resources: + requests: + memory: 4Gi + cpu: 2 + # docker-in-docker needs privileged mode + securityContext: + privileged: true diff --git a/Dockerfile.tester b/Dockerfile.tester new file mode 100644 index 0000000..46383a5 --- /dev/null +++ b/Dockerfile.tester @@ -0,0 +1,14 @@ +FROM docker.io/golang:1.23.7 + +ENV HTTEST_VERSION="0.3.4" +RUN curl --fail -LO https://codeberg.org/xrstf/httest/releases/download/v${HTTEST_VERSION}/httest_${HTTEST_VERSION}_linux_$(dpkg --print-architecture).tar.gz && \ + tar xzf httest_*.tar.gz && \ + mv httest_*/httest /usr/local/bin + +WORKDIR /apps/kcp +RUN git clone --depth 1 https://github.com/kcp-dev/kcp . && \ + go build -v ./test/... + +ENV NO_GORUN=1 + +CMD [ "bash", "-c", "go test -parallel 1 ./test/e2e/... -args --kcp-kubeconfig $KUBECONFIG" ] diff --git a/Makefile b/Makefile index b22b609..afa2444 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ KUSTOMIZE_VERSION ?= v5.4.3 CONTROLLER_TOOLS_VERSION ?= v0.16.1 ENVTEST_VERSION ?= release-0.19 GOLANGCI_LINT_VERSION ?= 2.1.6 +PROTOKOL_VERSION ?= 0.7.2 # Image URL to use all building/pushing image targets IMG ?= ghcr.io/kcp-dev/kcp-operator @@ -151,6 +152,7 @@ KUBECTL ?= $(TOOLS_DIR)/kubectl KUSTOMIZE ?= $(TOOLS_DIR)/kustomize ENVTEST ?= $(TOOLS_DIR)/setup-envtest GOLANGCI_LINT = $(TOOLS_DIR)/golangci-lint +PROTOKOL = $(TOOLS_DIR)/protokol RECONCILER_GEN := $(TOOLS_DIR)/reconciler-gen OPENSHIFT_GOIMPORTS := $(TOOLS_DIR)/openshift-goimports @@ -182,6 +184,13 @@ golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. $(GOLANGCI_LINT): @hack/download-tool.sh https://github.com/golangci/golangci-lint/releases/download/v${GOLANGCI_LINT_VERSION}/golangci-lint-${GOLANGCI_LINT_VERSION}-$(shell go env GOOS)-$(shell go env GOARCH).tar.gz golangci-lint $(GOLANGCI_LINT_VERSION) +.PHONY: protokol +protokol: $(PROTOKOL) ## Download protokol locally if necessary. + +.PHONY: $(PROTOKOL) +$(PROTOKOL): + @hack/download-tool.sh https://codeberg.org/xrstf/protokol/releases/download/v${PROTOKOL_VERSION}/protokol_${PROTOKOL_VERSION}_$(shell go env GOOS)_$(shell go env GOARCH).tar.gz protokol $(PROTOKOL_VERSION) + .PHONY: reconciler-gen reconciler-gen: $(RECONCILER_GEN) ## Download reconciler-gen locally if necessary. diff --git a/hack/ci/run-kcp-e2e-tests.sh b/hack/ci/run-kcp-e2e-tests.sh new file mode 100755 index 0000000..ea9abf5 --- /dev/null +++ b/hack/ci/run-kcp-e2e-tests.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Copyright 2025 The KCP Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +# build the image(s) +export IMAGE_TAG=local + +echo "Building container images…" +ARCHITECTURES=arm64 DRY_RUN=yes ./hack/ci/build-image.sh + +export KCP_E2E_TEST_IMAGE="ghcr.io/kcp-dev/kcp:e2e" +buildah build-using-dockerfile \ + --file Dockerfile.tester \ + --tag "$KCP_E2E_TEST_IMAGE" \ + --format=docker \ + . + +# start docker so we can run kind +start-docker.sh + +# create a local kind cluster +KIND_CLUSTER_NAME=e2e + +echo "Preloading the kindest/node image…" +docker load --input /kindest.tar + +export KUBECONFIG=$(mktemp) +echo "Creating kind cluster $KIND_CLUSTER_NAME…" +kind create cluster --name "$KIND_CLUSTER_NAME" +chmod 600 "$KUBECONFIG" + +# store logs as artifacts +make protokol +_tools/protokol --output "$ARTIFACTS/logs" --namespace 'kcp-*' >/dev/null 2>&1 & + +# load the operator image into the kind cluster +image="ghcr.io/kcp-dev/kcp-operator:$IMAGE_TAG" +archive=operator.tar + +echo "Loading operator image into kind…" +buildah manifest push --all "$image" "oci-archive:$archive:$image" +kind load image-archive "$archive" --name "$KIND_CLUSTER_NAME" + +# load the tester image +echo "Loading tester image into kind…" +archive=tester.tar +buildah push "$KCP_E2E_TEST_IMAGE" "oci-archive:$archive:$KCP_E2E_TEST_IMAGE" +kind load image-archive "$archive" --name "$KIND_CLUSTER_NAME" + +# deploy the operator +echo "Deploying operator…" +kubectl kustomize hack/ci/testdata | kubectl apply --filename - +kubectl --namespace kcp-operator-system wait deployment kcp-operator-controller-manager --for condition=Available +kubectl --namespace kcp-operator-system wait pod --all --for condition=Ready + +# deploying cert-manager +echo "Deploying cert-manager…" + +helm repo add jetstack https://charts.jetstack.io --force-update +helm repo update + +helm upgrade \ + --install \ + --namespace cert-manager \ + --create-namespace \ + --version v1.16.2 \ + --set crds.enabled=true \ + cert-manager jetstack/cert-manager + +kubectl apply --filename hack/ci/testdata/clusterissuer.yaml + +echo "Running kcp e2e tests…" +(set -x; go test -tags kcpe2e -timeout 2h -v ./test/e2e/...) + +echo "Done. :-)" diff --git a/internal/controller/kubeconfig_controller.go b/internal/controller/kubeconfig_controller.go index b286cd9..815ad28 100644 --- a/internal/controller/kubeconfig_controller.go +++ b/internal/controller/kubeconfig_controller.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "net/url" "time" certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" @@ -73,25 +72,25 @@ func (r *KubeconfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } + rootShard := &operatorv1alpha1.RootShard{} + shard := &operatorv1alpha1.Shard{} + var ( - clientCertIssuer, serverCA, serverURL, serverName string + clientCertIssuer string + serverCA string ) switch { case kc.Spec.Target.RootShardRef != nil: - var rootShard operatorv1alpha1.RootShard - if err := r.Get(ctx, types.NamespacedName{Name: kc.Spec.Target.RootShardRef.Name, Namespace: req.Namespace}, &rootShard); err != nil { + if err := r.Get(ctx, types.NamespacedName{Name: kc.Spec.Target.RootShardRef.Name, Namespace: req.Namespace}, rootShard); err != nil { return ctrl.Result{}, fmt.Errorf("failed to get RootShard: %w", err) } - clientCertIssuer = resources.GetRootShardCAName(&rootShard, operatorv1alpha1.ClientCA) - serverCA = resources.GetRootShardCAName(&rootShard, operatorv1alpha1.ServerCA) - serverURL = resources.GetRootShardBaseURL(&rootShard) - serverName = rootShard.Name + clientCertIssuer = resources.GetRootShardCAName(rootShard, operatorv1alpha1.ClientCA) + serverCA = resources.GetRootShardCAName(rootShard, operatorv1alpha1.ServerCA) case kc.Spec.Target.ShardRef != nil: - var shard operatorv1alpha1.Shard - if err := r.Get(ctx, types.NamespacedName{Name: kc.Spec.Target.ShardRef.Name, Namespace: req.Namespace}, &shard); err != nil { + if err := r.Get(ctx, types.NamespacedName{Name: kc.Spec.Target.ShardRef.Name, Namespace: req.Namespace}, shard); err != nil { return ctrl.Result{}, fmt.Errorf("failed to get Shard: %w", err) } @@ -99,16 +98,13 @@ func (r *KubeconfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) if ref == nil || ref.Name == "" { return ctrl.Result{}, errors.New("the Shard does not reference a (valid) RootShard") } - var rootShard operatorv1alpha1.RootShard - if err := r.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: req.Namespace}, &rootShard); err != nil { + if err := r.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: req.Namespace}, rootShard); err != nil { return ctrl.Result{}, fmt.Errorf("failed to get RootShard: %w", err) } // The client CA is shared among all shards and owned by the root shard. - clientCertIssuer = resources.GetRootShardCAName(&rootShard, operatorv1alpha1.ClientCA) - serverCA = resources.GetRootShardCAName(&rootShard, operatorv1alpha1.ServerCA) - serverURL = resources.GetShardBaseURL(&shard) - serverName = shard.Name + clientCertIssuer = resources.GetRootShardCAName(rootShard, operatorv1alpha1.ClientCA) + serverCA = resources.GetRootShardCAName(rootShard, operatorv1alpha1.ServerCA) case kc.Spec.Target.FrontProxyRef != nil: var frontProxy operatorv1alpha1.FrontProxy @@ -120,15 +116,12 @@ func (r *KubeconfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) if ref == nil || ref.Name == "" { return ctrl.Result{}, errors.New("the FrontProxy does not reference a (valid) RootShard") } - var rootShard operatorv1alpha1.RootShard - if err := r.Get(ctx, types.NamespacedName{Name: frontProxy.Spec.RootShard.Reference.Name, Namespace: req.Namespace}, &rootShard); err != nil { + if err := r.Get(ctx, types.NamespacedName{Name: frontProxy.Spec.RootShard.Reference.Name, Namespace: req.Namespace}, rootShard); err != nil { return ctrl.Result{}, fmt.Errorf("failed to get RootShard: %w", err) } - clientCertIssuer = resources.GetRootShardCAName(&rootShard, operatorv1alpha1.FrontProxyClientCA) - serverCA = resources.GetRootShardCAName(&rootShard, operatorv1alpha1.ServerCA) - serverURL = fmt.Sprintf("https://%s:6443", rootShard.Spec.External.Hostname) - serverName = rootShard.Spec.External.Hostname + clientCertIssuer = resources.GetRootShardCAName(rootShard, operatorv1alpha1.FrontProxyClientCA) + serverCA = resources.GetRootShardCAName(rootShard, operatorv1alpha1.ServerCA) default: return ctrl.Result{}, fmt.Errorf("no valid target for kubeconfig found") @@ -156,14 +149,12 @@ func (r *KubeconfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{RequeueAfter: time.Second * 5}, nil } - rootWSURL, err := url.JoinPath(serverURL, "clusters", "root") + reconciler, err := kubeconfig.KubeconfigSecretReconciler(&kc, rootShard, shard, serverCASecret, clientCertSecret) if err != nil { return ctrl.Result{}, err } - if err := k8creconciling.ReconcileSecrets(ctx, []k8creconciling.NamedSecretReconcilerFactory{ - kubeconfig.KubeconfigSecretReconciler(&kc, serverCASecret, clientCertSecret, serverName, rootWSURL), - }, req.Namespace, r.Client); err != nil { + if err := k8creconciling.ReconcileSecrets(ctx, []k8creconciling.NamedSecretReconcilerFactory{reconciler}, req.Namespace, r.Client); err != nil { return ctrl.Result{}, err } diff --git a/internal/resources/kubeconfig/secret.go b/internal/resources/kubeconfig/secret.go index 3e4f174..ede4c95 100644 --- a/internal/resources/kubeconfig/secret.go +++ b/internal/resources/kubeconfig/secret.go @@ -18,6 +18,7 @@ package kubeconfig import ( "fmt" + "net/url" "k8c.io/reconciler/pkg/reconciling" @@ -25,42 +26,92 @@ import ( "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "github.com/kcp-dev/kcp-operator/internal/resources" operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" ) -func KubeconfigSecretReconciler(kubeconfig *operatorv1alpha1.Kubeconfig, caSecret, certSecret *corev1.Secret, serverName, serverURL string) reconciling.NamedSecretReconcilerFactory { - return func() (string, reconciling.SecretReconciler) { - return kubeconfig.Spec.SecretRef.Name, func(secret *corev1.Secret) (*corev1.Secret, error) { - var config *clientcmdapi.Config +func KubeconfigSecretReconciler( + kubeconfig *operatorv1alpha1.Kubeconfig, + rootShard *operatorv1alpha1.RootShard, + shard *operatorv1alpha1.Shard, + caSecret, certSecret *corev1.Secret, +) (reconciling.NamedSecretReconcilerFactory, error) { + config := &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{}, + Contexts: map[string]*clientcmdapi.Context{}, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + kubeconfig.Spec.Username: { + ClientCertificateData: certSecret.Data["tls.crt"], + ClientKeyData: certSecret.Data["tls.key"], + }, + }, + } - if secret.Data == nil { - secret.Data = make(map[string][]byte) - } + addCluster := func(name, url string) { + config.Clusters[name] = &clientcmdapi.Cluster{ + Server: url, + CertificateAuthorityData: caSecret.Data["tls.crt"], + } + config.Contexts[name] = &clientcmdapi.Context{ + Cluster: name, + AuthInfo: kubeconfig.Spec.Username, + } + } - config = &clientcmdapi.Config{} + switch { + case kubeconfig.Spec.Target.RootShardRef != nil: + if rootShard == nil { + panic("RootShard must be provided when kubeconfig targets one.") + } - config.Clusters = map[string]*clientcmdapi.Cluster{ - serverName: { - Server: serverURL, - CertificateAuthorityData: caSecret.Data["tls.crt"], - }, - } + serverURL := resources.GetRootShardBaseURL(rootShard) + defaultURL, err := url.JoinPath(serverURL, "clusters", "root") + if err != nil { + return nil, err + } - contextName := fmt.Sprintf("%s:%s", serverName, kubeconfig.Spec.Username) + addCluster("default", defaultURL) + addCluster("base", serverURL) + config.CurrentContext = "default" - config.Contexts = map[string]*clientcmdapi.Context{ - contextName: { - Cluster: serverName, - AuthInfo: kubeconfig.Spec.Username, - }, - } - config.AuthInfos = map[string]*clientcmdapi.AuthInfo{ - kubeconfig.Spec.Username: { - ClientCertificateData: certSecret.Data["tls.crt"], - ClientKeyData: certSecret.Data["tls.key"], - }, + case kubeconfig.Spec.Target.ShardRef != nil: + if shard == nil { + panic("Shard must be provided when kubeconfig targets one.") + } + + serverURL := resources.GetShardBaseURL(shard) + defaultURL, err := url.JoinPath(serverURL, "clusters", "root") + if err != nil { + return nil, err + } + + addCluster("default", defaultURL) + addCluster("base", serverURL) + config.CurrentContext = "default" + + case kubeconfig.Spec.Target.FrontProxyRef != nil: + if rootShard == nil { + panic("RootShard must be provided when kubeconfig targets a FrontProxy.") + } + + serverURL := fmt.Sprintf("https://%s:6443", rootShard.Spec.External.Hostname) + defaultURL, err := url.JoinPath(serverURL, "clusters", "root") + if err != nil { + return nil, err + } + + addCluster("default", defaultURL) + config.CurrentContext = "default" + + default: + panic("Called reconciler for an invalid kubeconfig, this should not have happened.") + } + + return func() (string, reconciling.SecretReconciler) { + return kubeconfig.Spec.SecretRef.Name, func(secret *corev1.Secret) (*corev1.Secret, error) { + if secret.Data == nil { + secret.Data = make(map[string][]byte) } - config.CurrentContext = contextName data, err := clientcmd.Write(*config) if err != nil { @@ -71,5 +122,5 @@ func KubeconfigSecretReconciler(kubeconfig *operatorv1alpha1.Kubeconfig, caSecre return secret, nil } - } + }, nil } diff --git a/test/e2e/kcpe2e/kcp_test.go b/test/e2e/kcpe2e/kcp_test.go new file mode 100644 index 0000000..34d7e1b --- /dev/null +++ b/test/e2e/kcpe2e/kcp_test.go @@ -0,0 +1,184 @@ +//go:build kcpe2e + +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kcpe2e + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/go-logr/logr" + kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrlruntime "sigs.k8s.io/controller-runtime" + + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" + "github.com/kcp-dev/kcp-operator/test/utils" +) + +func TestKcpTestSuite(t *testing.T) { + const ( + namespace = "kcp-e2e" + externalHostname = "example.localhost" + ) + + testImage := os.Getenv("KCP_E2E_TEST_IMAGE") + if testImage == "" { + t.Skip("No $KCP_E2E_TEST_IMAGE defined.") + } + + ctrlruntime.SetLogger(logr.Discard()) + + client := utils.GetKubeClient(t) + ctx := context.Background() + + // create namspace + utils.CreateSelfDestructingNamespace(t, ctx, client, namespace) + + // deploy a root shard incl. etcd + rootShard := utils.DeployRootShard(ctx, t, client, namespace, externalHostname) + + // deploy a 2nd shard incl. etcd + shardName := "aadvark" + utils.DeployShard(ctx, t, client, namespace, shardName, rootShard.Name) + + // deploy front-proxy + utils.DeployFrontProxy(ctx, t, client, namespace, rootShard.Name, externalHostname) + + // create a kubeconfig to access the root shard + rsConfigSecretName := fmt.Sprintf("%s-shard-kubeconfig", rootShard.Name) + + rsConfig := operatorv1alpha1.Kubeconfig{} + rsConfig.Name = rsConfigSecretName + rsConfig.Namespace = namespace + + rsConfig.Spec = operatorv1alpha1.KubeconfigSpec{ + Target: operatorv1alpha1.KubeconfigTarget{ + RootShardRef: &corev1.LocalObjectReference{ + Name: rootShard.Name, + }, + }, + Username: "e2e", + Validity: metav1.Duration{Duration: 2 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: rsConfigSecretName, + }, + Groups: []string{"system:masters"}, + } + + t.Log("Creating kubeconfig for RootShard…") + if err := client.Create(ctx, &rsConfig); err != nil { + t.Fatal(err) + } + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: rsConfig.Namespace, Name: rsConfig.Spec.SecretRef.Name}) + + t.Log("Connecting to RootShard…") + rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace, rsConfig.Name) + + // wait until the 2nd shard has registered itself successfully at the root shard + shardKey := types.NamespacedName{Name: shardName} + t.Log("Waiting for Shard to register itself on the RootShard…") + utils.WaitForObject(t, ctx, rootShardClient, &kcpcorev1alpha1.Shard{}, shardKey) + + // create a kubeconfig to access the shard + shardConfigSecretName := fmt.Sprintf("%s-shard-kubeconfig", shardName) + + shardConfig := operatorv1alpha1.Kubeconfig{} + shardConfig.Name = shardConfigSecretName + shardConfig.Namespace = namespace + + shardConfig.Spec = operatorv1alpha1.KubeconfigSpec{ + Target: operatorv1alpha1.KubeconfigTarget{ + ShardRef: &corev1.LocalObjectReference{ + Name: shardName, + }, + }, + Username: "e2e", + Validity: metav1.Duration{Duration: 2 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: shardConfigSecretName, + }, + Groups: []string{"system:masters"}, + } + + t.Log("Creating kubeconfig for Shard…") + if err := client.Create(ctx, &shardConfig); err != nil { + t.Fatal(err) + } + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: shardConfig.Namespace, Name: shardConfig.Spec.SecretRef.Name}) + + t.Log("Connecting to Shard…") + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace, shardConfig.Name) + + // proof of life: list something every logicalcluster in kcp has + t.Log("Should be able to list Secrets.") + secrets := &corev1.SecretList{} + if err := kcpClient.List(ctx, secrets); err != nil { + t.Fatalf("Failed to list secrets in kcp: %v", err) + } + + // deploy kcp e2e test container into the cluster + testPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + GenerateName: "kcp-e2e-", + Labels: map[string]string{ + "test": "kcp-e2e", + }, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{{ + Name: "e2e", + Image: testImage, + ImagePullPolicy: corev1.PullNever, + Env: []corev1.EnvVar{{ + Name: "KUBECONFIG", + Value: "/opt/rootshard-kubeconfig/kubeconfig", + }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "rootshard-kubeconfig", + ReadOnly: true, + MountPath: "/opt/rootshard-kubeconfig", + }}, + }}, + Volumes: []corev1.Volume{{ + Name: "rootshard-kubeconfig", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: rsConfigSecretName, + }, + }, + }}, + }, + } + + t.Log("Creating kcp e2e test pod…") + if err := client.Create(ctx, testPod); err != nil { + t.Fatal(err) + } + + t.Log("Sleeping for 10 minutes...") + time.Sleep(10 * time.Minute) +}