Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions apiexport/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ import (
kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache"
apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1"
"github.com/kcp-dev/logicalcluster/v3"

mcpcache "github.com/kcp-dev/multicluster-provider/internal/cache"
)

var _ multicluster.Provider = &Provider{}
Expand All @@ -52,7 +54,7 @@ var _ multicluster.Provider = &Provider{}
type Provider struct {
config *rest.Config
scheme *runtime.Scheme
cache WildcardCache
cache mcpcache.WildcardCache
object client.Object

log logr.Logger
Expand All @@ -70,7 +72,7 @@ type Options struct {

// WildcardCache is the wildcard cache to use for the provider. If this is
// nil, a new wildcard cache will be created for the given rest.Config.
WildcardCache WildcardCache
WildcardCache mcpcache.WildcardCache

// ObjectToWatch is the object type that the provider watches via a /clusters/*
// wildcard endpoint to extract information about logical clusters joining and
Expand All @@ -92,7 +94,7 @@ func New(cfg *rest.Config, options Options) (*Provider, error) {
}
if options.WildcardCache == nil {
var err error
options.WildcardCache, err = NewWildcardCache(cfg, cache.Options{
options.WildcardCache, err = mcpcache.NewWildcardCache(cfg, cache.Options{
Scheme: options.Scheme,
})
if err != nil {
Expand Down Expand Up @@ -125,7 +127,7 @@ func (p *Provider) Run(ctx context.Context, mgr mcmanager.Manager) error {
if err != nil {
return fmt.Errorf("failed to get %T informer: %w", p.object, err)
}
shInf, _, _, err := p.cache.getSharedInformer(p.object)
shInf, _, _, err := p.cache.GetSharedInformer(p.object)
if err != nil {
return fmt.Errorf("failed to get shared informer: %w", err)
}
Expand Down Expand Up @@ -155,7 +157,7 @@ func (p *Provider) Run(ctx context.Context, mgr mcmanager.Manager) error {

// create new scoped cluster.
clusterCtx, cancel := context.WithCancel(ctx)
cl, err := newScopedCluster(p.config, clusterName, p.cache, p.scheme)
cl, err := mcpcache.NewScopedCluster(p.config, clusterName, p.cache, p.scheme)
if err != nil {
p.log.Error(err, "failed to create cluster", "cluster", clusterName)
cancel()
Expand Down
63 changes: 63 additions & 0 deletions envtest/workspaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,69 @@ func NewWorkspaceFixture(t TestingT, clusterClient kcpclient.ClusterClient, pare
return ws, parent.Join(ws.Name)
}

// NewInitializingWorkspaceFixture creates a new workspace under the given parent
// using the given client, and waits for it to be stuck in the initializing phase.
func NewInitializingWorkspaceFixture(t TestingT, clusterClient kcpclient.ClusterClient, parent logicalcluster.Path, options ...WorkspaceOption) (*tenancyv1alpha1.Workspace, logicalcluster.Path) {
t.Helper()

ctx, cancelFunc := context.WithCancel(context.Background())
t.Cleanup(cancelFunc)

ws := &tenancyv1alpha1.Workspace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "e2e-workspace-",
},
Spec: tenancyv1alpha1.WorkspaceSpec{
Type: tenancyv1alpha1.WorkspaceTypeReference{
Name: tenancyv1alpha1.WorkspaceTypeName("universal"),
Path: "root",
},
},
}
for _, opt := range options {
opt(ws)
}

// we are referring here to a WorkspaceType that may have just been created; if the admission controller
// does not have a fresh enough cache, our request will be denied as the admission controller does not know the
// type exists. Therefore, we can require.Eventually our way out of this problem. We expect users to create new
// types very infrequently, so we do not think this will be a serious UX issue in the product.
Eventually(t, func() (bool, string) {
err := clusterClient.Cluster(parent).Create(ctx, ws)
return err == nil, fmt.Sprintf("error creating workspace under %s: %v", parent, err)
}, wait.ForeverTestTimeout, time.Millisecond*100, "failed to create %s workspace under %s", ws.Spec.Type.Name, parent)

wsName := ws.Name
t.Cleanup(func() {
if os.Getenv("PRESERVE") != "" {
return
}

ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30))
defer cancelFn()

err := clusterClient.Cluster(parent).Delete(ctx, ws)
if apierrors.IsNotFound(err) || apierrors.IsForbidden(err) {
return // ignore not found and forbidden because this probably means the parent has been deleted
}
require.NoErrorf(t, err, "failed to delete workspace %s", wsName)
})

Eventually(t, func() (bool, string) {
err := clusterClient.Cluster(parent).Get(ctx, client.ObjectKey{Name: ws.Name}, ws)
require.Falsef(t, apierrors.IsNotFound(err), "workspace %s was deleted", parent.Join(ws.Name))
require.NoError(t, err, "failed to get workspace %s", parent.Join(ws.Name))
if actual, expected := ws.Status.Phase, corev1alpha1.LogicalClusterPhaseInitializing; actual != expected {
return false, fmt.Sprintf("workspace phase is %s, not %s\n\n%s", actual, expected, toYaml(t, ws))
}
return true, ""
}, workspaceInitTimeout, time.Millisecond*100, "%s workspace %s is not stuck on initializing", ws.Spec.Type, parent.Join(ws.Name))

t.Logf("Created %s workspace %s as /clusters/%s on shard %q", ws.Spec.Type, parent.Join(ws.Name), ws.Spec.Cluster, WorkspaceShardOrDie(t, clusterClient, ws).Name)

return ws, parent.Join(ws.Name)
}

// WorkspaceShard returns the shard that a workspace is scheduled on.
func WorkspaceShard(ctx context.Context, kcpClient kcpclient.ClusterClient, ws *tenancyv1alpha1.Workspace) (*corev1alpha1.Shard, error) {
shards := &corev1alpha1.ShardList{}
Expand Down
7 changes: 0 additions & 7 deletions examples/apiexport/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package main
import (
"context"
"fmt"
"net/http"
"os"

"github.com/spf13/pflag"
Expand Down Expand Up @@ -139,9 +138,3 @@ func main() {
os.Exit(1)
}
}

type RoundTripperFunc func(*http.Request) (*http.Response, error)

func (f RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
return f(r)
}
41 changes: 41 additions & 0 deletions examples/initializingworkspaces/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# `initializingworkspaces` Example Controller

This folder contains an example controller for the `initializingworkspaces` provider implementation. It reconciles `ConfigMap` objects across kcp workspaces.

It can be tested by applying the necessary manifests from the respective folder while connected to the `root` workspace of a kcp instance:

```sh
$ kubectl apply -f ./manifests/bundle.yaml
workspacetype.tenancy.kcp.io/examples-initializingworkspaces-multicluster created
workspace.tenancy.kcp.io/example1 created
workspace.tenancy.kcp.io/example2 created
workspace.tenancy.kcp.io/example3 created
```

Then, start the example controller by passing the virtual workspace URL to it:

```sh
$ go run . --server=$(kubectl get workspacetype examples-initializingworkspaces-multicluster -o jsonpath="{.status.virtualWorkspaces[0].url}") --initializer=root:examples-initializingworkspaces-multicluster
```

Observe the controller reconciling every logical cluster and creating the child workspace `initialized-workspace` and the `kcp-initializer-cm` ConfigMap in each workspace and removing the initializer when done.

```sh
2025-07-24T10:26:58+02:00 INFO Starting to initialize cluster {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "0c69fc44-4886-42f8-b620-82a6fe96a165", "cluster": "1i6ttu8js47cs302"}
2025-07-24T10:26:58+02:00 INFO Creating child workspace {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "0c69fc44-4886-42f8-b620-82a6fe96a165", "cluster": "1i6ttu8js47cs302", "name": "initialized-workspace-1i6ttu8js47cs302"}
2025-07-24T10:26:58+02:00 INFO kcp-initializing-workspaces-provider disengaging non-initializing workspace {"cluster": "1cxhyp0xy8lartoi"}
2025-07-24T10:26:58+02:00 INFO Workspace created successfully {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "0c69fc44-4886-42f8-b620-82a6fe96a165", "cluster": "1i6ttu8js47cs302", "name": "initialized-workspace-1i6ttu8js47cs302"}
2025-07-24T10:26:58+02:00 INFO Reconciling ConfigMap {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "0c69fc44-4886-42f8-b620-82a6fe96a165", "cluster": "1i6ttu8js47cs302", "name": "kcp-initializer-cm", "uuid": ""}
2025-07-24T10:26:58+02:00 INFO ConfigMap created successfully {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "0c69fc44-4886-42f8-b620-82a6fe96a165", "cluster": "1i6ttu8js47cs302", "name": "kcp-initializer-cm", "uuid": "9a8a8d5d-d606-4e08-bb69-679719d94867"}
2025-07-24T10:26:58+02:00 INFO Removed initializer from LogicalCluster status {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "0c69fc44-4886-42f8-b620-82a6fe96a165", "cluster": "1i6ttu8js47cs302", "name": "cluster", "uuid": "4c2fd3cf-512f-45f4-a9d3-6886c6542ccf"}
2025-07-24T10:26:58+02:00 INFO Reconciling LogicalCluster {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl", "logical cluster": {"owner":{"apiVersion":"tenancy.kcp.io/v1alpha1","resource":"workspaces","name":"example1","cluster":"root","uid":"1d79b26f-cfb8-40d5-934d-b4a61eb20f12"},"initializers":["root:universal","root:examples-initializingworkspaces-multicluster","system:apibindings"]}}
2025-07-24T10:26:58+02:00 INFO Starting to initialize cluster {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl"}
2025-07-24T10:26:58+02:00 INFO Creating child workspace {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl", "name": "initialized-workspace-2hwz9858cyir31hl"}
2025-07-24T10:26:58+02:00 INFO kcp-initializing-workspaces-provider disengaging non-initializing workspace {"cluster": "1i6ttu8js47cs302"}
2025-07-24T10:26:58+02:00 INFO Workspace created successfully {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl", "name": "initialized-workspace-2hwz9858cyir31hl"}
2025-07-24T10:26:58+02:00 INFO Reconciling ConfigMap {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl", "name": "kcp-initializer-cm", "uuid": ""}
2025-07-24T10:26:58+02:00 INFO ConfigMap created successfully {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl", "name": "kcp-initializer-cm", "uuid": "87462d41-16b5-4617-9f7c-3894160576b7"}
2025-07-24T10:26:58+02:00 INFO Removed initializer from LogicalCluster status {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "99bd39f0-3ea8-4805-9770-60f95127b5ac", "cluster": "2hwz9858cyir31hl", "name": "cluster", "uuid": "cfeee05f-3cba-4766-b464-ba3ebe41a3fa"}
2025-07-24T10:26:58+02:00 INFO Reconciling LogicalCluster {"controller": "kcp-initializer-controller", "controllerGroup": "core.kcp.io", "controllerKind": "LogicalCluster", "reconcileID": "8ad43574-3862-4452-b1e0-e9daf1e67a54", "cluster": "2hwz9858cyir31hl", "logical cluster": {"owner":{"apiVersion":"tenancy.kcp.io/v1alpha1","resource":"workspaces","name":"example1","cluster":"root","uid":"1d79b26f-cfb8-40d5-934d-b4a61eb20f12"},"initializers":["root:universal","root:examples-initializingworkspaces-multicluster","system:apibindings"]}}
2025-07-24T10:26:59+02:00 INFO kcp-initializing-workspaces-provider disengaging non-initializing workspace {"cluster": "2hwz9858cyir31hl"}
```
203 changes: 203 additions & 0 deletions examples/initializingworkspaces/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
/*
Copyright 2025 The KCP Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package main

import (
"context"
"fmt"
"os"
"slices"

"github.com/spf13/pflag"
"go.uber.org/zap/zapcore"

corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder"
mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager"
mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile"

apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1"
corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1"
"github.com/kcp-dev/kcp/sdk/apis/tenancy/initialization"
tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1"

"github.com/kcp-dev/multicluster-provider/initializingworkspaces"
)

func init() {
runtime.Must(corev1alpha1.AddToScheme(scheme.Scheme))
runtime.Must(tenancyv1alpha1.AddToScheme(scheme.Scheme))
runtime.Must(apisv1alpha1.AddToScheme(scheme.Scheme))
}

func main() {
var (
server string
initializerName string
provider *initializingworkspaces.Provider
verbosity int
)

pflag.StringVar(&server, "server", "", "Override for kubeconfig server URL")
pflag.StringVar(&initializerName, "initializer", "initializer:example", "Name of the initializer to use")
pflag.IntVar(&verbosity, "v", 1, "Log verbosity level")
pflag.Parse()

logOpts := zap.Options{
Development: true,
Level: zapcore.Level(-verbosity),
}
log.SetLogger(zap.New(zap.UseFlagOptions(&logOpts)))

ctx := signals.SetupSignalHandler()
entryLog := log.Log.WithName("entrypoint")
cfg := ctrl.GetConfigOrDie()
cfg = rest.CopyConfig(cfg)

if server != "" {
cfg.Host = server
}

entryLog.Info("Setting up manager")
opts := manager.Options{}

var err error
provider, err = initializingworkspaces.New(cfg, initializingworkspaces.Options{InitializerName: initializerName})
if err != nil {
entryLog.Error(err, "unable to construct cluster provider")
os.Exit(1)
}

mgr, err := mcmanager.New(cfg, provider, opts)
if err != nil {
entryLog.Error(err, "unable to set up overall controller manager")
os.Exit(1)
}

if err := mcbuilder.ControllerManagedBy(mgr).
Named("kcp-initializer-controller").
For(&corev1alpha1.LogicalCluster{}).
Complete(mcreconcile.Func(
func(ctx context.Context, req mcreconcile.Request) (ctrl.Result, error) {
log := log.FromContext(ctx).WithValues("cluster", req.ClusterName)
cl, err := mgr.GetCluster(ctx, req.ClusterName)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get cluster: %w", err)
}
client := cl.GetClient()
lc := &corev1alpha1.LogicalCluster{}
if err := client.Get(ctx, req.NamespacedName, lc); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get logical cluster: %w", err)
}

log.Info("Reconciling LogicalCluster", "logical cluster", lc.Spec)
initializer := corev1alpha1.LogicalClusterInitializer(initializerName)
// check if your initializer is still set on the logicalcluster
if slices.Contains(lc.Status.Initializers, initializer) {
log.Info("Starting to initialize cluster")
workspaceName := fmt.Sprintf("initialized-workspace-%s", req.ClusterName)
ws := &tenancyv1alpha1.Workspace{}
err = client.Get(ctx, ctrlclient.ObjectKey{Name: workspaceName}, ws)
if err != nil {
if !apierrors.IsNotFound(err) {
log.Error(err, "Error checking for existing workspace")
return reconcile.Result{}, nil
}

log.Info("Creating child workspace", "name", workspaceName)
ws = &tenancyv1alpha1.Workspace{
ObjectMeta: ctrl.ObjectMeta{
Name: workspaceName,
},
}

if err := client.Create(ctx, ws); err != nil {
log.Error(err, "Failed to create workspace")
return reconcile.Result{}, nil
}
log.Info("Workspace created successfully", "name", workspaceName)
}

if ws.Status.Phase != corev1alpha1.LogicalClusterPhaseReady {
log.Info("Workspace not ready yet", "current-phase", ws.Status.Phase)
return reconcile.Result{Requeue: true}, nil
}
log.Info("Workspace is ready, proceeding to create ConfigMap")

s := &corev1.ConfigMap{
ObjectMeta: ctrl.ObjectMeta{
Name: "kcp-initializer-cm",
Namespace: "default",
},
Data: map[string]string{
"test-data": "example-value",
},
}
log.Info("Reconciling ConfigMap", "name", s.Name, "uuid", s.UID)
if err := client.Create(ctx, s); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to create configmap: %w", err)
}
log.Info("ConfigMap created successfully", "name", s.Name, "uuid", s.UID)
}
// Remove the initializer from the logical cluster status
// so that it won't be processed again.
if !slices.Contains(lc.Status.Initializers, initializer) {
log.Info("Initializer already absent, skipping patch")
return reconcile.Result{}, nil
}

patch := ctrlclient.MergeFrom(lc.DeepCopy())
lc.Status.Initializers = initialization.EnsureInitializerAbsent(initializer, lc.Status.Initializers)
if err := client.Status().Patch(ctx, lc, patch); err != nil {
return reconcile.Result{}, err
}
log.Info("Removed initializer from LogicalCluster status", "name", lc.Name, "uuid", lc.UID)
return reconcile.Result{}, nil
},
)); err != nil {
entryLog.Error(err, "failed to build controller")
os.Exit(1)
}

if provider != nil {
entryLog.Info("Starting provider")
go func() {
if err := provider.Run(ctx, mgr); err != nil {
entryLog.Error(err, "unable to run provider")
os.Exit(1)
}
}()
}

entryLog.Info("Starting manager")
if err := mgr.Start(ctx); err != nil {
entryLog.Error(err, "unable to run manager")
os.Exit(1)
}
}
Loading