Create a Runtime SDK extension for Cluster API

Like how the admission controller lets you hook into different workload cluster event requests(creation, updation, deletion of objects) and validate or mutate them accordingly, the runtime extension allows you to connect to various cluster events and make necessary changes.

NOTE – Currently the feature is in the experimental stage and to enable the feature you have to load the environment variable EXP_RUNTIME_SDK=true

In general, the extension works as a webhook and can be written in any language of preference but to leverage the advantages of upstream CAPI we are going to use Golang here.

Here we are going to create a Runtime SDK extension that is going to hook into both DoAfterControlPlaneInitialized & DoAfterControlPlaneInitialized and for its operation on ConfigMaps. Let’s create a project name runtimesdk and create a main.go file where we are doing –

  • Initializing the necessary command line flags.
  • Creating a Golang profiler server.
  • Getting the client for interacting with the Kubernetes API server(see line 94).
  • Get the handler that we are going to implement next.
  • Initializing webhook server(see line 82).
  • Registering BeforeClusterDelete, AfterControlPlaneInitialized events in the webhook server(see line 108).
  • Run the webhook server.
package main

import (
	"flag"
	"net/http"
	"os"

	handler "github.com/aniruddha2000/runtime-sdk/handlers"
	"github.com/spf13/pflag"
	cliflag "k8s.io/component-base/cli/flag"
	"k8s.io/component-base/logs"
	logsv1 "k8s.io/component-base/logs/api/v1"
	"k8s.io/klog/v2"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"

	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
	"sigs.k8s.io/cluster-api/exp/runtime/server"
)

var (
	// catalog contains all information about RuntimeHooks.
	catalog = runtimecatalog.New()

	// Flags.
	profilerAddress string
	webhookPort     int
	webhookCertDir  string
	logOptions      = logs.NewOptions()
)

func init() {
	// Adds to the catalog all the RuntimeHooks defined in cluster API.
	_ = runtimehooksv1.AddToCatalog(catalog)
}

// InitFlags initializes the flags.
func InitFlags(fs *pflag.FlagSet) {
	// Initialize logs flags using Kubernetes component-base machinery.
	logsv1.AddFlags(logOptions, fs)

	// Add test-extension specific flags
	fs.StringVar(&profilerAddress, "profiler-address", "",
		"Bind address to expose the pprof profiler (e.g. localhost:6060)")

	fs.IntVar(&webhookPort, "webhook-port", 9443,
		"Webhook Server port")

	fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/",
		"Webhook cert dir, only used when webhook-port is specified.")
}

func main() {
	// Creates a logger to be used during the main func.
	setupLog := ctrl.Log.WithName("main")

	// Initialize and parse command line flags.
	InitFlags(pflag.CommandLine)
	pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
	pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
	pflag.Parse()

	// Validates logs flags using Kubernetes component-base machinery and applies them
	if err := logsv1.ValidateAndApply(logOptions, nil); err != nil {
		setupLog.Error(err, "unable to start extension")
		os.Exit(1)
	}

	// Add the klog logger in the context.
	ctrl.SetLogger(klog.Background())

	// Initialize the golang profiler server, if required.
	if profilerAddress != "" {
		klog.Infof("Profiler listening for requests at %s", profilerAddress)
		go func() {
			klog.Info(http.ListenAndServe(profilerAddress, nil))
		}()
	}

	// Create a http server for serving runtime extensions
	webhookServer, err := server.New(server.Options{
		Catalog: catalog,
		Port:    webhookPort,
		CertDir: webhookCertDir,
	})
	if err != nil {
		setupLog.Error(err, "error creating webhook server")
		os.Exit(1)
	}

	// Lifecycle Hooks
	restConfig, err := ctrl.GetConfig()
	if err != nil {
		setupLog.Error(err, "error getting config for the cluster")
		os.Exit(1)
	}

	client, err := client.New(restConfig, client.Options{})
	if err != nil {
		setupLog.Error(err, "error creating client to the cluster")
		os.Exit(1)
	}

	lifecycleExtensionHandlers := handler.NewExtensionHandlers(client)

	// Register extension handlers.
	if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
		Hook:        runtimehooksv1.BeforeClusterDelete,
		Name:        "before-cluster-delete",
		HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterDelete,
	}); err != nil {
		setupLog.Error(err, "error adding handler")
		os.Exit(1)
	}

	if err := webhookServer.AddExtensionHandler(server.ExtensionHandler{
		Hook:        runtimehooksv1.AfterControlPlaneInitialized,
		Name:        "before-cluster-create",
		HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneInitialized,
	}); err != nil {
		setupLog.Error(err, "error adding handler")
		os.Exit(1)
	}

	// Setup a context listening for SIGINT.
	ctx := ctrl.SetupSignalHandler()

	// Start the https server.
	setupLog.Info("Starting Runtime Extension server")
	if err := webhookServer.Start(ctx); err != nil {
		setupLog.Error(err, "error running webhook server")
		os.Exit(1)
	}
}

Now, it’s time to create the handlers for each event, let’s create a file handlers/hooks.go , here we are doing this –

  • DoAfterControlPlaneInitialized –
    • Check whether a ConfigMap is present or not for the particular name & namespace.
    • If not it’s going to create one, otherwise it won’t complain about anything and the request will pass.
  • DoBeforeClusterDelete –
    • Check whether the ConfigMap is present or not for the particular name & namespace.
    • If yes it’s going to delete it before the workload cluster gets deleted, otherwise the request will pass.
package handler

import (
	"context"
	"fmt"

	"github.com/pkg/errors"
	corev1 "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/klog/v2"
	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"
)

type ExtensionHandler struct {
	client client.Client
}

func NewExtensionHandlers(client client.Client) *ExtensionHandler {
	return &ExtensionHandler{
		client: client,
	}
}

func (e *ExtensionHandler) DoBeforeClusterDelete(ctx context.Context, request *runtimehooksv1.BeforeClusterDeleteRequest, response *runtimehooksv1.BeforeClusterDeleteResponse) {
	log := ctrl.LoggerFrom(ctx)
	log.Info("DoBeforeClusterDelete is called")
	log.Info("Namespace:", request.Cluster.GetNamespace(), "ClusterName: ", request.Cluster.GetName())

	// Your implementation
	configMapName := fmt.Sprintf("%s-test-extension-hookresponse", request.Cluster.GetName())
	ok, err := e.checkConfigMap(ctx, &request.Cluster, configMapName)
	if err != nil {
		response.Status = runtimehooksv1.ResponseStatusFailure
		response.Message = err.Error()
		return
	}
	if ok {
		if err := e.deleteConfigMap(ctx, &request.Cluster, configMapName); err != nil {
			response.Status = runtimehooksv1.ResponseStatusFailure
			response.Message = err.Error()
			return
		}
	}
}

func (e *ExtensionHandler) DoAfterControlPlaneInitialized(ctx context.Context, request *runtimehooksv1.AfterControlPlaneInitializedRequest, response *runtimehooksv1.AfterControlPlaneInitializedResponse) {
	log := ctrl.LoggerFrom(ctx)
	log.Info("DoAfterControlPlaneInitialized is called")
	log.Info("Namespace:", request.Cluster.GetNamespace(), "ClusterName: ", request.Cluster.GetName())

	// Your implementation
	configMapName := fmt.Sprintf("%s-test-extension-hookresponse", request.Cluster.GetName())
	ok, err := e.checkConfigMap(ctx, &request.Cluster, configMapName)
	if err != nil {
		response.Status = runtimehooksv1.ResponseStatusFailure
		response.Message = err.Error()
		return
	}
	if !ok {
		if err := e.createConfigMap(ctx, &request.Cluster, configMapName); err != nil {
			response.Status = runtimehooksv1.ResponseStatusFailure
			response.Message = err.Error()
			return
		}
	}
}

func (e *ExtensionHandler) checkConfigMap(ctx context.Context, cluster *clusterv1.Cluster, configMapName string) (bool, error) {
	log := ctrl.LoggerFrom(ctx)
	log.Info("Checking for ConfigMap", configMapName)

	configMap := &corev1.ConfigMap{}
	nsName := client.ObjectKey{Namespace: cluster.GetNamespace(), Name: configMapName}
	if err := e.client.Get(ctx, nsName, configMap); err != nil {
		if apierrors.IsNotFound(err) {
			log.Info("ConfigMap not found")
			return false, nil
		}
		log.Error(err, "ConfigMap not found with an error")
		return false, errors.Wrapf(err, "failed to read the ConfigMap %s", klog.KRef(cluster.Namespace, configMapName))
	}
	log.Info("ConfigMap found")
	return true, nil
}

func (e *ExtensionHandler) createConfigMap(ctx context.Context, cluster *clusterv1.Cluster, configMapName string) error {
	log := ctrl.LoggerFrom(ctx)
	log.Info("Creating ConfigMap")

	configMap := e.getConfigMap(cluster, configMapName)
	if err := e.client.Create(ctx, configMap); err != nil {
		log.Error(err, "failed to create ConfigMap")
		return errors.Wrapf(err, "failed to create the ConfigMap %s", klog.KRef(cluster.Namespace, configMapName))
	}
	log.Info("configmap created successfully")
	return nil
}

func (e *ExtensionHandler) deleteConfigMap(ctx context.Context, cluster *clusterv1.Cluster, configMapName string) error {
	log := ctrl.LoggerFrom(ctx)
	log.Info("Deleting ConfigMap")

	if err := e.client.Delete(ctx, &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      configMapName,
			Namespace: cluster.GetNamespace(),
		},
	}); err != nil {
		log.Error(err, "failed to delete ConfigMap")
		return err
	}
	return nil
}

func (e *ExtensionHandler) getConfigMap(cluster *clusterv1.Cluster, configMapName string) *corev1.ConfigMap {
	return &corev1.ConfigMap{
		ObjectMeta: metav1.ObjectMeta{
			Name:      configMapName,
			Namespace: cluster.GetNamespace(),
		},
		Data: map[string]string{
			"AfterControlPlaneInitialized-preloadedResponse": `{"Status": "Success"}`,
		},
	}
}

Implement the Kubernetes manifest

This is the most interesting part and some bits and pieces need to be taken care of, such as –

  • Kubernetes ecosystem by default only supports SSL secure webhooks. For that, we are going to use cert-manager to automate the self-signed certificate automation.
  • The extension config must be registered through the ExtensionConfig CRD.
  • Don’t forget about the RBAC, if you are doing some operation over some resources, make sure you define permissions for those.

NOTE – For this example, we are doing everything in runtimesdk namespace.

Let’s start with certificate.yaml

  • Creating a self-signed certificate using the Issuer
  • Defining the DNS Service name for the certificate
    • <service_name>.<namespace>.svc
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
  name: runtime-sdk-selfsigned-issuer
  namespace: runtimesdk
spec:
  selfSigned: {}

---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: serving-cert
  namespace: runtimesdk
spec:
  dnsNames:
    - test-runtime-sdk-svc.runtimesdk.svc
    - test-runtime-sdk-svc.runtimesdk.svc.cluster.local
    - localhost
  issuerRef:
    kind: Issuer
    name: runtime-sdk-selfsigned-issuer
  secretName: test-runtime-sdk-svc-cert

service.yaml

  • Defining the ClusterIP service, and the target deployment. Running the webhook in port 443 which is typically used for https URLs.
apiVersion: v1
kind: Service
metadata:
  name: test-runtime-sdk-svc
  namespace: runtimesdk
spec:
  type: ClusterIP
  selector:
    app: test-runtime-sdk
  ports:
    - port: 443
      targetPort: 9443

deployment.yaml

  • Build your docker image and push it to the repository.
  • Get the certificates and mount them in a volume, and use it in the argument while running the container.
apiVersion: apps/v1
kind: Deployment
metadata:
  name: test-runtime-sdk
  namespace: runtimesdk
spec:
  selector:
    matchLabels:
      app: test-runtime-sdk
  template:
    metadata:
      labels:
        app: test-runtime-sdk
    spec:
      serviceAccountName: test-runtime-sdk-sa
      containers:
        - name: test-runtime-sdk
          image: <image_name>:<image_tag>
          imagePullPolicy: Always
          args:
            - --webhook-cert-dir=/var/run/webhook/serving-cert/
          resources:
            limits:
              memory: "128Mi"
              cpu: "500m"
          ports:
            - containerPort: 9443
          volumeMounts:
            - mountPath: /var/run/webhook/serving-cert
              name: serving-cert
      volumes:
        - name: serving-cert
          secret:
            secretName: test-runtime-sdk-svc-cert

Service Account, Cluster Role, Cluster Rolebindings –

  • Create your own service account.
  • Add get, list, create, and delete permissions.
  • Bind the role with the service account using role bindings.
apiVersion: v1
kind: ServiceAccount
metadata:
  name: test-runtime-sdk-sa
  namespace: runtimesdk

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: test-runtime-sdk-role
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - get
      - list
      - create
      - delete

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: test-runtime-sdk-role-rolebinding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: test-runtime-sdk-role-role
subjects:
  - kind: ServiceAccount
    name: test-runtime-sdk-sa
    namespace: runtimesdk

Lastly, the most important piece, the ExtensionConfig CRD –

  • Get the certificates through annotations.
  • Specify where the Runtime Extension is deployed.
  • Specify Runtime Extension is used by Cluster in which namespace.
apiVersion: runtime.cluster.x-k8s.io/v1alpha1
kind: ExtensionConfig
metadata:
  annotations:
    runtime.cluster.x-k8s.io/inject-ca-from-secret: runtimesdk/test-runtime-sdk-svc-cert
  name: test-runtime-sdk-extensionconfig
spec:
  clientConfig:
    service:
      name: test-runtime-sdk-svc
      namespace: runtimesdk # Note: this assumes the test extension get deployed in the runtimesdk namespace
      port: 443
  namespaceSelector:
    matchExpressions:
      - key: kubernetes.io/metadata.name
        operator: In
        values:
          - default # Note: this assumes the test extension is used by Cluster in the default namespace only

You can define the Dockerfile like this –

FROM golang:alpine3.17 as builder
WORKDIR /src
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build \
    --mount=type=cache,target=/go/pkg/mod \
    go build -o runtime-sdk

FROM alpine
WORKDIR /app
COPY --from=builder /src/runtime-sdk /app/runtime-sdk
ENTRYPOINT ["/app/runtime-sdk"]

Let’s run the App in a Kind CAPD Cluster

  • Export necessary ENV variables, Create a kind cluster –
$ cat > cluster.env << EOF
export CLUSTER_TOPOLOGY=true
export EXP_RUNTIME_SDK=true
export SERVICE_CIDR=["10.96.0.0/12"]
export POD_CIDR=["192.168.0.0/16"]
export SERVICE_DOMAIN="k8s.test"
EOF

$ source cluster.env

$ cat > kind-cluster-with-extramounts.yaml <<EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
networking:
  ipFamily: dual
name: extension-config-test
nodes:
- role: control-plane
  extraMounts:
    - hostPath: /var/run/docker.sock
      containerPath: /var/run/docker.sock
EOF

$ kind create cluster --config kind-cluster-with-extramounts.yaml
Creating cluster "extension-config-test" ...
 ✓ Ensuring node image (kindest/node:v1.27.1) 🖼
 ✓ Preparing nodes 📦  
 ✓ Writing configuration 📜 
 ✓ Starting control-plane 🕹️ 
 ✓ Installing CNI 🔌 
 ✓ Installing StorageClass 💾 
Set kubectl context to "kind-extension-config-test"
You can now use your cluster with:

kubectl cluster-info --context kind-extension-config-test

Thanks for using kind! 😊
  • Create the runtimesdk namespace & initialize the management cluster –
$ kubectl create ns runtimesdk

$ clusterctl init --infrastructure docker
Fetching providers
Installing cert-manager Version="v1.11.1"
Waiting for cert-manager to be available...
Installing Provider="cluster-api" Version="v1.4.2" TargetNamespace="capi-system"
Installing Provider="bootstrap-kubeadm" Version="v1.4.2" TargetNamespace="capi-kubeadm-bootstrap-system"
Installing Provider="control-plane-kubeadm" Version="v1.4.2" TargetNamespace="capi-kubeadm-control-plane-system"
Installing Provider="infrastructure-docker" Version="v1.4.2" TargetNamespace="capd-system"

Your management cluster has been initialized successfully!

You can now create your first workload cluster by running the following:

  clusterctl generate cluster [name] --kubernetes-version [version] | kubectl apply -f -
  • Now apply all of the created manifest, and see there are two thing that you must see is –
    • ExtensionConfig Deployment logs.
    • Status of the ExtensionConfig CRD.
$ k apply -f runtime-sdk/manifests/config/
extensionconfig.runtime.cluster.x-k8s.io/test-runtime-sdk-extensionconfig created
issuer.cert-manager.io/runtime-sdk-selfsigned-issuer created
certificate.cert-manager.io/serving-cert created
deployment.apps/test-runtime-sdk created
serviceaccount/test-runtime-sdk-sa created
clusterrole.rbac.authorization.k8s.io/test-runtime-sdk-role created
clusterrolebinding.rbac.authorization.k8s.io/test-runtime-sdk-role-rolebinding created
service/test-runtime-sdk-svc created
$ k get pods -n runtimesdk
NAME                                READY   STATUS    RESTARTS   AGE
test-runtime-sdk-5bc665d7b9-725hl   1/1     Running   0          12m

$ k logs -n runtimesdk test-runtime-sdk-5bc665d7b9-725hl --follow
I0524 07:30:59.714901       1 main.go:130] "main: Starting Runtime Extension server"
I0524 07:30:59.715180       1 server.go:149] "controller-runtime/webhook: Registering webhook" path="/hooks.runtime.cluster.x-k8s.io/v1alpha1/beforeclusterdelete/before-cluster-delete"
I0524 07:30:59.715261       1 server.go:149] "controller-runtime/webhook: Registering webhook" path="/hooks.runtime.cluster.x-k8s.io/v1alpha1/aftercontrolplaneinitialized/before-cluster-create"
I0524 07:30:59.715314       1 server.go:149] "controller-runtime/webhook: Registering webhook" path="/hooks.runtime.cluster.x-k8s.io/v1alpha1/discovery"
I0524 07:30:59.715340       1 server.go:217] "controller-runtime/webhook/webhooks: Starting webhook server"
I0524 07:30:59.716380       1 certwatcher.go:131] "controller-runtime/certwatcher: Updated current TLS certificate"
I0524 07:30:59.716757       1 certwatcher.go:85] "controller-runtime/certwatcher: Starting certificate watcher"
I0524 07:30:59.716918       1 server.go:271] "controller-runtime/webhook: Serving webhook server" host="" port=9443

Now the log showing that our app is running perfectly, let’s see the status now,

$ k describe extensionconfig test-runtime-sdk-extensionconfig -n runtimesdk
Name:         test-runtime-sdk-extensionconfig
Namespace:    
Labels:       <none>
Annotations:  runtime.cluster.x-k8s.io/inject-ca-from-secret: runtimesdk/test-runtime-sdk-svc-cert
API Version:  runtime.cluster.x-k8s.io/v1alpha1
Kind:         ExtensionConfig
Metadata:
  Creation Timestamp:  2023-05-24T07:21:49Z
  Generation:          2
  Resource Version:    3939
  UID:                 62af95a7-d924-46f6-9c5a-4ba3f4407749
Spec:
  Client Config:
    Ca Bundle:  LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURIakNDQWdhZ0F3SUJBZ0lSQUx0b1VxQzlEdHBIVTl2TkJrU0xmV0l3RFFZSktvWklodmNOQVFFTEJRQXcKQURBZUZ3MHlNekExTWpRd056SXhORGxhRncweU16QTRNakl3TnpJeE5EbGFNQUF3Z2dFaU1BMEdDU3FHU0liMwpEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURBMUl0Mm1OdVdJMmpRUlY1cHRWTDZ3cGFHdWhObG9GWHV2b1poCkwzWHJRcktiWmRaRnJUbGlZSTI4TXlxVmhSNGh2U2MzVXp5TS8rUjdYVURCT01BNkFZeEtacXg0a3VPRk1ITXkKcUhDTTNuZTZUUCsxUS9CQkRWelMvdk9tRzdnNlF1V3VyMmFtbW4zeTI4dUpWZ0hVaUZQaHZLVHE4U0J4LzY0NQo3bEluQWVpSWVrc3JqTHFJRlFka3NnSlAvbUxSTjI4RTNPL0tVTEp5RWxsakxIelZZcmVXck5rUEh6OGVmZmFECmtmSnMxTTN0NFh3c1Jyd09QQXliUmtGcTNJbENpNEoyL3EyZHZTRlRXdy9EelRuSkE1OEt6N003MlN6aXlJRnkKM1U3ajRISkVqbG9paGU2dlJtUUxEZm5wV0xEdXhvbVJpdURMWU14dHU5VkxweEdIQWdNQkFBR2pnWkl3Z1k4dwpEZ1lEVlIwUEFRSC9CQVFEQWdXZ01Bd0dBMVVkRXdFQi93UUNNQUF3YndZRFZSMFJBUUgvQkdVd1k0SWpkR1Z6CmRDMXlkVzUwYVcxbExYTmtheTF6ZG1NdWNuVnVkR2x0WlhOa2F5NXpkbU9DTVhSbGMzUXRjblZ1ZEdsdFpTMXoKWkdzdGMzWmpMbkoxYm5ScGJXVnpaR3N1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDQ1d4dlkyRnNhRzl6ZERBTgpCZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFFSUsvOFJqeFBiYy80T2I4MWY4Z2h2dVN3Z0Y0V0dkK3dONVZpSndICngzVm5GWGJ6d1YvMHZreEJ5SDhFR2xLcnRjcTNVMDFvZ0taQVRadW9DYWxLVjZvUHYvNklNbXR4WHMzMk5EeWoKamwvU3FHOXJlMFhRMXBYa2xIVHpIMk9ha0ozWjZ1TUMxSzgrWS9YRUJMYzZibjhYSXpad3N5VDJkZ0RJeTkrNQpkMjZqek9EejZ4Y2h2TzBSNm1ZK2psazJpMzdwSHRiZWxrOExFeE9ObmFNWlZvWWIrYmtRWXZ5MEZQdEhsZ0NnClQycVBWQ3FISmV2cWxIakk3UFQ4YmVlNFVKcHc1Rld4L0FjbU9qd3BjTkZWbkMwaFFtZmNTazNvb2Z4bTViem0KUTd1d1ZaSzBmWDFaVjJvWGNrZEtPMUluNnZpVkpWSzRESzV3MXh3MnBMWHhGUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    Service:
      Name:       test-runtime-sdk-svc
      Namespace:  runtimesdk
      Port:       443
  Namespace Selector:
    Match Expressions:
      Key:       kubernetes.io/metadata.name
      Operator:  In
      Values:
        default
Status:
  Conditions:
    Last Transition Time:  2023-05-24T07:32:44Z
    Status:                True
    Type:                  Discovered
  Handlers:
    Failure Policy:  Fail
    Name:            before-cluster-delete.test-runtime-sdk-extensionconfig
    Request Hook:
      API Version:    hooks.runtime.cluster.x-k8s.io/v1alpha1
      Hook:           BeforeClusterDelete
    Timeout Seconds:  10
    Failure Policy:   Fail
    Name:             before-cluster-create.test-runtime-sdk-extensionconfig
    Request Hook:
      API Version:    hooks.runtime.cluster.x-k8s.io/v1alpha1
      Hook:           AfterControlPlaneInitialized
    Timeout Seconds:  10
Events:               <none>

If you look closely, it has fetched the CA bundle correctly from the annotations and both of the Hook showing in the status.

  • Create a Workload Cluster Now –
$ clusterctl generate cluster extension-config-test --flavor development \
--kubernetes-version v1.27.1 \
--control-plane-machine-count=1 \
--worker-machine-count=1 \
> manifests/capi/capi-quickstart.yaml

$ k apply -f manifests/capi/capi-quickstart.yaml
clusterclass.cluster.x-k8s.io/quick-start created
dockerclustertemplate.infrastructure.cluster.x-k8s.io/quick-start-cluster created
kubeadmcontrolplanetemplate.controlplane.cluster.x-k8s.io/quick-start-control-plane created
dockermachinetemplate.infrastructure.cluster.x-k8s.io/quick-start-control-plane created
dockermachinetemplate.infrastructure.cluster.x-k8s.io/quick-start-default-worker-machinetemplate created
kubeadmconfigtemplate.bootstrap.cluster.x-k8s.io/quick-start-default-worker-bootstraptemplate created
cluster.cluster.x-k8s.io/extension-config-test created

Let’s see the logs and ConfigMap if it has created something or not,

I0524 07:40:49.405854       1 hooks.go:52] "DoAfterControlPlaneInitialized is called"
I0524 07:40:49.406022       1 hooks.go:53] "Namespace:" default="ClusterName: " extension-config-test="(MISSING)"
I0524 07:40:49.406093       1 hooks.go:74] "Checking for ConfigMap" extension-config-test-test-extension-hookresponse="(MISSING)"
I0524 07:40:49.421562       1 hooks.go:80] "ConfigMap not found"
I0524 07:40:49.421596       1 hooks.go:92] "Creating ConfigMap"
I0524 07:40:49.437841       1 hooks.go:99] "configmap created successfully"
$ k get configmaps
NAME                                                DATA   AGE
extension-config-test-test-extension-hookresponse   1      76s
kube-root-ca.crt                                    1      26m

Yep, now our config map is up. Let’s test the delete,

$ delete -f manifests/capi/capi-quickstart.yaml

$ k logs -n runtimesdk test-runtime-sdk-5bc665d7b9-725hl --follow
I0524 07:44:08.266319       1 hooks.go:30] "DoBeforeClusterDelete is called"
I0524 07:44:08.266347       1 hooks.go:31] "Namespace:" default="ClusterName: " extension-config-test="(MISSING)"
I0524 07:44:08.268351       1 hooks.go:74] "Checking for ConfigMap" extension-config-test-test-extension-hookresponse="(MISSING)"
I0524 07:44:08.288940       1 hooks.go:86] "ConfigMap found"
I0524 07:44:08.289163       1 hooks.go:105] "Deleting ConfigMap"
$ k get configmaps
NAME               DATA   AGE
kube-root-ca.crt   1      29m

So, now there is now ConfigMap as well, Everything is working fine then 😉

Thanks for reading 🙂

Feedbacks are welcome!

Leave a comment