Skip to content

infra: Improve local development flow and avoid false positive infra diffs #267

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Aug 15, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions deploy/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,18 @@ build: ## Build the Pulumi Go program
# Local stack commands
local-login: ## Login to local Pulumi backend
pulumi login --local
PULUMI_CONFIG_PASSPHRASE="" pulumi stack select local --create

local-preview: build local-login ## Preview local infrastructure changes
PULUMI_CONFIG_PASSPHRASE="" pulumi preview --stack local

local-up: build local-login ## Deploy local infrastructure
PULUMI_CONFIG_PASSPHRASE="" pulumi up --yes --stack local

local-destroy: local-login ## Destroy local infrastructure
pulumi stack rm local --force --yes --preserve-config
echo "Make sure to also delete your k8s cluster, e.g. minikube delete"

# Staging stack commands
staging-login: ## Login to staging Pulumi backend
pulumi login gs://mcp-registry-staging-pulumi-state
Expand Down
32 changes: 16 additions & 16 deletions deploy/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,22 +10,22 @@ Pre-requisites:
- [Pulumi CLI installed](https://www.pulumi.com/docs/iac/download-install/)
- Access to a Kubernetes cluster via kubeconfig. You can run a cluster locally with [minikube](https://minikube.sigs.k8s.io/docs/start/).

1. Set Pulumi's backend to local: `pulumi login --local`
2. Init the local stack: `pulumi stack init local` (fine to leave `password` blank)
3. Set your config:
```bash
# General environment
pulumi config set mcp-registry:environment local

# To use your local kubeconfig (default)
pulumi config set mcp-registry:provider local
# GitHub OAuth
pulumi config set mcp-registry:githubClientId <your-github-client-id>
pulumi config set --secret mcp-registry:githubClientSecret <your-github-client-secret>
```
4. Deploy: `make local-up`
5. Access the repository via the ingress load balancer. You can find its external IP with `kubectl get svc ingress-nginx-controller -n ingress-nginx` (with minikube, if it's 'pending' you might need `minikube tunnel`). Then run `curl -H "Host: local.registry.modelcontextprotocol.io" -k https://<EXTERNAL-IP>/v0/ping` to check that the service is up.
1. Ensure your kubeconfig is configured at the cluster you want to use. For minikube, run `minikube start && minikube tunnel`.
2. Run `make local-up` to deploy the stack. Run this again if the first attempt fails.
3. Access the repository via the ingress load balancer. You can find its external IP with `kubectl get svc ingress-nginx-controller -n ingress-nginx`. Then run `curl -H "Host: local.registry.modelcontextprotocol.io" -k https://<EXTERNAL-IP>/v0/ping` to check that the service is up.

#### To change config

The stack is configured out of the box for local development. But if you want to make changes, run commands like:

```bash
PULUMI_CONFIG_PASSPHRASE="" pulumi config set mcp-registry:environment local
PULUMI_CONFIG_PASSPHRASE="" pulumi config set mcp-registry:githubClientSecret --secret <some-secret-value>
```

#### To delete the stack

`make local-destroy` and deleting the cluster (with minikube: `minikube delete`) will reset you back to a clean state.

### Production Deployment (GCP)

Expand Down
8 changes: 4 additions & 4 deletions deploy/pkg/k8s/cert_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
// SetupCertManager sets up cert-manager for TLS certificates
func SetupCertManager(ctx *pulumi.Context, cluster *providers.ProviderInfo) error {
// Create namespace for cert-manager
_, err := v1.NewNamespace(ctx, "cert-manager", &v1.NamespaceArgs{
certManagerNamespace, err := v1.NewNamespace(ctx, "cert-manager", &v1.NamespaceArgs{
Metadata: &metav1.ObjectMetaArgs{
Name: pulumi.String("cert-manager"),
},
Expand All @@ -24,13 +24,13 @@ func SetupCertManager(ctx *pulumi.Context, cluster *providers.ProviderInfo) erro
}

// Install cert-manager for TLS certificates
_, err = helm.NewChart(ctx, "cert-manager", helm.ChartArgs{
certManager, err := helm.NewChart(ctx, "cert-manager", helm.ChartArgs{
Chart: pulumi.String("cert-manager"),
Version: pulumi.String("v1.18.2"),
FetchArgs: helm.FetchArgs{
Repo: pulumi.String("https://charts.jetstack.io"),
},
Namespace: pulumi.String("cert-manager"),
Namespace: certManagerNamespace.Metadata.Name().Elem(),
Values: pulumi.Map{
"installCRDs": pulumi.Bool(true),
"ingressShim": pulumi.Map{
Expand Down Expand Up @@ -69,7 +69,7 @@ func SetupCertManager(ctx *pulumi.Context, cluster *providers.ProviderInfo) erro
},
},
},
}, pulumi.Provider(cluster.Provider))
}, pulumi.Provider(cluster.Provider), pulumi.DependsOn([]pulumi.Resource{certManager}))
if err != nil {
return err
}
Expand Down
64 changes: 28 additions & 36 deletions deploy/pkg/k8s/ingress.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package k8s

import (
"strings"

v1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3"
metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1"
Expand All @@ -19,7 +21,7 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo
}

// Create namespace for ingress-nginx
_, err := v1.NewNamespace(ctx, "ingress-nginx", &v1.NamespaceArgs{
ingressNginxNamespace, err := v1.NewNamespace(ctx, "ingress-nginx", &v1.NamespaceArgs{
Metadata: &metav1.ObjectMetaArgs{
Name: pulumi.String("ingress-nginx"),
},
Expand All @@ -29,22 +31,17 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo
}

// Install NGINX Ingress Controller
ingressType := "LoadBalancer"
if provider == "local" {
ingressType = "NodePort"
}

nginxIngress, err := helm.NewChart(ctx, "ingress-nginx", helm.ChartArgs{
ingressNginx, err := helm.NewChart(ctx, "ingress-nginx", helm.ChartArgs{
Chart: pulumi.String("ingress-nginx"),
Version: pulumi.String("4.13.0"),
FetchArgs: helm.FetchArgs{
Repo: pulumi.String("https://kubernetes.github.io/ingress-nginx"),
},
Namespace: pulumi.String("ingress-nginx"),
Namespace: ingressNginxNamespace.Metadata.Name().Elem(),
Values: pulumi.Map{
"controller": pulumi.Map{
"service": pulumi.Map{
"type": pulumi.String(ingressType),
"type": pulumi.String("LoadBalancer"),
"annotations": pulumi.Map{
// Add Azure Load Balancer health probe annotation as otherwise it defaults to / which fails
"service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path": pulumi.String("/healthz"),
Expand All @@ -66,35 +63,30 @@ func SetupIngressController(ctx *pulumi.Context, cluster *providers.ProviderInfo
return err
}

// Use the helm chart to get service information after deployment
ingressIps := nginxIngress.Resources.ApplyT(func(resources interface{}) interface{} {
if ctx.DryRun() {
return []string{} // Return empty array on error during preview
}

// Look up the service after the chart is ready
svc, err := v1.GetService(
ctx,
"ingress-nginx-controller-lookup",
pulumi.ID("ingress-nginx/ingress-nginx-controller"),
&v1.ServiceState{},
pulumi.Provider(cluster.Provider),
pulumi.DependsOn([]pulumi.Resource{nginxIngress}),
)
if err != nil {
return []string{} // Return empty array on error during preview
}

// Return the LoadBalancer ingress IPs
return svc.Status.LoadBalancer().Ingress().ApplyT(func(ingresses []v1.LoadBalancerIngress) []string {
var ips []string
for _, ingress := range ingresses {
if ip := ingress.Ip; ip != nil && *ip != "" {
ips = append(ips, *ip)
// Extract ingress IPs from the Helm chart's controller service
ingressIps := ingressNginx.Resources.ApplyT(func(resources interface{}) interface{} {
// Look for the ingress-nginx-controller service
resourceMap := resources.(map[string]pulumi.Resource)
for resourceName, resource := range resourceMap {
if strings.Contains(resourceName, "ingress-nginx-controller") &&
!strings.Contains(resourceName, "admission") &&
strings.Contains(resourceName, "Service") {
if svc, ok := resource.(*v1.Service); ok {
// Return the LoadBalancer ingress IPs
return svc.Status.LoadBalancer().Ingress().ApplyT(func(ingresses []v1.LoadBalancerIngress) []string {
var ips []string
for _, ingress := range ingresses {
if ip := ingress.Ip; ip != nil && *ip != "" {
ips = append(ips, *ip)
}
}
return ips
})
}
}
return ips
})
}
// Return empty array if no matching service found
return []string{}
})
ctx.Export("ingressIps", ingressIps)

Expand Down
Loading