From a986023ec7dbb881421524aee1d1882d132f629b Mon Sep 17 00:00:00 2001 From: Adam Jones Date: Wed, 13 Aug 2025 17:54:19 +0100 Subject: [PATCH 1/2] infra: Avoid false positive infra diffs because of (now) unnecessary special case export handling --- deploy/pkg/k8s/ingress.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/deploy/pkg/k8s/ingress.go b/deploy/pkg/k8s/ingress.go index decc6aa..d366346 100644 --- a/deploy/pkg/k8s/ingress.go +++ b/deploy/pkg/k8s/ingress.go @@ -68,10 +68,6 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo // Use the helm chart to get service information after deployment ingressIps := nginxIngress.Resources.ApplyT(func(resources interface{}) interface{} { - if ctx.DryRun() { - return []string{} // Return empty array on error during preview - } - // Look up the service after the chart is ready svc, err := v1.GetService( ctx, From 403092532043b1444a9c643cbd7f93ec1262e2a9 Mon Sep 17 00:00:00 2001 From: Adam Jones Date: Fri, 15 Aug 2025 10:58:20 +0100 Subject: [PATCH 2/2] infra: Improve local development flow --- deploy/Makefile | 5 +++ deploy/README.md | 32 +++++++++--------- deploy/pkg/k8s/cert_manager.go | 8 ++--- deploy/pkg/k8s/ingress.go | 60 ++++++++++++++++------------------ 4 files changed, 53 insertions(+), 52 deletions(-) diff --git a/deploy/Makefile b/deploy/Makefile index 91a5be9..c2bf795 100644 --- a/deploy/Makefile +++ b/deploy/Makefile @@ -12,6 +12,7 @@ build: ## Build the Pulumi Go program # Local stack commands local-login: ## Login to local Pulumi backend pulumi login --local + PULUMI_CONFIG_PASSPHRASE="" pulumi stack select local --create local-preview: build local-login ## Preview local infrastructure changes PULUMI_CONFIG_PASSPHRASE="" pulumi preview --stack local @@ -19,6 +20,10 @@ local-preview: build local-login ## Preview local infrastructure changes local-up: build local-login ## Deploy local infrastructure PULUMI_CONFIG_PASSPHRASE="" pulumi up --yes --stack local +local-destroy: local-login ## Destroy local infrastructure + pulumi stack rm local --force --yes --preserve-config + echo "Make sure to also delete your k8s cluster, e.g. minikube delete" + # Staging stack commands staging-login: ## Login to staging Pulumi backend pulumi login gs://mcp-registry-staging-pulumi-state diff --git a/deploy/README.md b/deploy/README.md index 685cb11..217c6b7 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -10,22 +10,22 @@ Pre-requisites: - [Pulumi CLI installed](https://www.pulumi.com/docs/iac/download-install/) - Access to a Kubernetes cluster via kubeconfig. You can run a cluster locally with [minikube](https://minikube.sigs.k8s.io/docs/start/). -1. Set Pulumi's backend to local: `pulumi login --local` -2. Init the local stack: `pulumi stack init local` (fine to leave `password` blank) -3. Set your config: - ```bash - # General environment - pulumi config set mcp-registry:environment local - - # To use your local kubeconfig (default) - pulumi config set mcp-registry:provider local - - # GitHub OAuth - pulumi config set mcp-registry:githubClientId - pulumi config set --secret mcp-registry:githubClientSecret - ``` -4. Deploy: `make local-up` -5. Access the repository via the ingress load balancer. You can find its external IP with `kubectl get svc ingress-nginx-controller -n ingress-nginx` (with minikube, if it's 'pending' you might need `minikube tunnel`). Then run `curl -H "Host: local.registry.modelcontextprotocol.io" -k https:///v0/ping` to check that the service is up. +1. Ensure your kubeconfig is configured at the cluster you want to use. For minikube, run `minikube start && minikube tunnel`. +2. Run `make local-up` to deploy the stack. Run this again if the first attempt fails. +3. Access the repository via the ingress load balancer. You can find its external IP with `kubectl get svc ingress-nginx-controller -n ingress-nginx`. Then run `curl -H "Host: local.registry.modelcontextprotocol.io" -k https:///v0/ping` to check that the service is up. + +#### To change config + +The stack is configured out of the box for local development. But if you want to make changes, run commands like: + +```bash +PULUMI_CONFIG_PASSPHRASE="" pulumi config set mcp-registry:environment local +PULUMI_CONFIG_PASSPHRASE="" pulumi config set mcp-registry:githubClientSecret --secret +``` + +#### To delete the stack + +`make local-destroy` and deleting the cluster (with minikube: `minikube delete`) will reset you back to a clean state. ### Production Deployment (GCP) diff --git a/deploy/pkg/k8s/cert_manager.go b/deploy/pkg/k8s/cert_manager.go index fe37f1d..3572226 100644 --- a/deploy/pkg/k8s/cert_manager.go +++ b/deploy/pkg/k8s/cert_manager.go @@ -14,7 +14,7 @@ import ( // SetupCertManager sets up cert-manager for TLS certificates func SetupCertManager(ctx *pulumi.Context, cluster *providers.ProviderInfo) error { // Create namespace for cert-manager - _, err := v1.NewNamespace(ctx, "cert-manager", &v1.NamespaceArgs{ + certManagerNamespace, err := v1.NewNamespace(ctx, "cert-manager", &v1.NamespaceArgs{ Metadata: &metav1.ObjectMetaArgs{ Name: pulumi.String("cert-manager"), }, @@ -24,13 +24,13 @@ func SetupCertManager(ctx *pulumi.Context, cluster *providers.ProviderInfo) erro } // Install cert-manager for TLS certificates - _, err = helm.NewChart(ctx, "cert-manager", helm.ChartArgs{ + certManager, err := helm.NewChart(ctx, "cert-manager", helm.ChartArgs{ Chart: pulumi.String("cert-manager"), Version: pulumi.String("v1.18.2"), FetchArgs: helm.FetchArgs{ Repo: pulumi.String("https://charts.jetstack.io"), }, - Namespace: pulumi.String("cert-manager"), + Namespace: certManagerNamespace.Metadata.Name().Elem(), Values: pulumi.Map{ "installCRDs": pulumi.Bool(true), "ingressShim": pulumi.Map{ @@ -69,7 +69,7 @@ func SetupCertManager(ctx *pulumi.Context, cluster *providers.ProviderInfo) erro }, }, }, - }, pulumi.Provider(cluster.Provider)) + }, pulumi.Provider(cluster.Provider), pulumi.DependsOn([]pulumi.Resource{certManager})) if err != nil { return err } diff --git a/deploy/pkg/k8s/ingress.go b/deploy/pkg/k8s/ingress.go index d366346..94d315c 100644 --- a/deploy/pkg/k8s/ingress.go +++ b/deploy/pkg/k8s/ingress.go @@ -1,6 +1,8 @@ package k8s import ( + "strings" + v1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1" "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3" metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1" @@ -19,7 +21,7 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo } // Create namespace for ingress-nginx - _, err := v1.NewNamespace(ctx, "ingress-nginx", &v1.NamespaceArgs{ + ingressNginxNamespace, err := v1.NewNamespace(ctx, "ingress-nginx", &v1.NamespaceArgs{ Metadata: &metav1.ObjectMetaArgs{ Name: pulumi.String("ingress-nginx"), }, @@ -29,22 +31,17 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo } // Install NGINX Ingress Controller - ingressType := "LoadBalancer" - if provider == "local" { - ingressType = "NodePort" - } - - nginxIngress, err := helm.NewChart(ctx, "ingress-nginx", helm.ChartArgs{ + ingressNginx, err := helm.NewChart(ctx, "ingress-nginx", helm.ChartArgs{ Chart: pulumi.String("ingress-nginx"), Version: pulumi.String("4.13.0"), FetchArgs: helm.FetchArgs{ Repo: pulumi.String("https://kubernetes.github.io/ingress-nginx"), }, - Namespace: pulumi.String("ingress-nginx"), + Namespace: ingressNginxNamespace.Metadata.Name().Elem(), Values: pulumi.Map{ "controller": pulumi.Map{ "service": pulumi.Map{ - "type": pulumi.String(ingressType), + "type": pulumi.String("LoadBalancer"), "annotations": pulumi.Map{ // Add Azure Load Balancer health probe annotation as otherwise it defaults to / which fails "service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path": pulumi.String("/healthz"), @@ -66,31 +63,30 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo return err } - // Use the helm chart to get service information after deployment - ingressIps := nginxIngress.Resources.ApplyT(func(resources interface{}) interface{} { - // Look up the service after the chart is ready - svc, err := v1.GetService( - ctx, - "ingress-nginx-controller-lookup", - pulumi.ID("ingress-nginx/ingress-nginx-controller"), - &v1.ServiceState{}, - pulumi.Provider(cluster.Provider), - pulumi.DependsOn([]pulumi.Resource{nginxIngress}), - ) - if err != nil { - return []string{} // Return empty array on error during preview - } - - // Return the LoadBalancer ingress IPs - return svc.Status.LoadBalancer().Ingress().ApplyT(func(ingresses []v1.LoadBalancerIngress) []string { - var ips []string - for _, ingress := range ingresses { - if ip := ingress.Ip; ip != nil && *ip != "" { - ips = append(ips, *ip) + // Extract ingress IPs from the Helm chart's controller service + ingressIps := ingressNginx.Resources.ApplyT(func(resources interface{}) interface{} { + // Look for the ingress-nginx-controller service + resourceMap := resources.(map[string]pulumi.Resource) + for resourceName, resource := range resourceMap { + if strings.Contains(resourceName, "ingress-nginx-controller") && + !strings.Contains(resourceName, "admission") && + strings.Contains(resourceName, "Service") { + if svc, ok := resource.(*v1.Service); ok { + // Return the LoadBalancer ingress IPs + return svc.Status.LoadBalancer().Ingress().ApplyT(func(ingresses []v1.LoadBalancerIngress) []string { + var ips []string + for _, ingress := range ingresses { + if ip := ingress.Ip; ip != nil && *ip != "" { + ips = append(ips, *ip) + } + } + return ips + }) } } - return ips - }) + } + // Return empty array if no matching service found + return []string{} }) ctx.Export("ingressIps", ingressIps)