|
| 1 | +import * as pulumi from "@pulumi/pulumi"; |
| 2 | +import * as awsx from "@pulumi/awsx"; |
| 3 | +import * as eks from "@pulumi/eks"; |
| 4 | +import { createArgoRole } from "./iam" |
| 5 | +import { GitOpsClusterConfig } from "./github" |
| 6 | + |
| 7 | +const stackName = pulumi.getStack() |
| 8 | +const config = new pulumi.Config() |
| 9 | +let roleMappings: eks.RoleMapping[] = [] |
| 10 | + |
| 11 | +export const outputs: {[key: string]: any} = { |
| 12 | + "stackName": stackName, |
| 13 | +} |
| 14 | + |
| 15 | +// VPC Config |
| 16 | +const vpc = new awsx.ec2.Vpc("vpc", { |
| 17 | + cidrBlock: config.require("cidrBlock"), |
| 18 | + numberOfAvailabilityZones: 3, |
| 19 | + enableDnsHostnames: true, |
| 20 | + enableDnsSupport: true, |
| 21 | + subnetSpecs: [ |
| 22 | + { |
| 23 | + type: awsx.ec2.SubnetType.Private, |
| 24 | + tags: { |
| 25 | + "karpenter.sh/discovery": `${stackName}-cluster`, |
| 26 | + [`kubernetes.io/cluster/${stackName}-cluster`]: "owned", |
| 27 | + "kubernetes.io/role/internal-elb": "1", |
| 28 | + }, |
| 29 | + }, |
| 30 | + { |
| 31 | + type: awsx.ec2.SubnetType.Isolated, |
| 32 | + name: "tgw-attachment-subnet", |
| 33 | + cidrMask: 27, |
| 34 | + }, |
| 35 | + { |
| 36 | + type: awsx.ec2.SubnetType.Public, |
| 37 | + tags: { |
| 38 | + [`kubernetes.io/cluster/${stackName}-cluster`]: "owned", |
| 39 | + "kubernetes.io/role/elb": "1", |
| 40 | + }, |
| 41 | + }, |
| 42 | + ], |
| 43 | +}) |
| 44 | + |
| 45 | +// If we are creating a spoke cluster we need to create argoRole first, ensure it |
| 46 | +// gets added to auth mapping for the cluster with the correct permissions |
| 47 | +if (config.require("clusterType") === "spoke") { |
| 48 | + const argoRole = createArgoRole(config.require("awsAccountId"), pulumi.output(""), config) |
| 49 | + roleMappings.push({ |
| 50 | + roleArn: argoRole.arn, |
| 51 | + username: argoRole.arn, |
| 52 | + groups: ["system:masters"], |
| 53 | + }) |
| 54 | + outputs.argoRoleArn = argoRole.arn |
| 55 | +} |
| 56 | + |
| 57 | +// Create EKS Cluster with a default node group |
| 58 | +const eksCluster = new eks.Cluster(`${stackName}-cluster`, { |
| 59 | + name: `${stackName}-cluster`, |
| 60 | + vpcId: vpc.vpcId, |
| 61 | + version: "1.29", |
| 62 | + publicSubnetIds: vpc.publicSubnetIds, |
| 63 | + privateSubnetIds: vpc.privateSubnetIds, |
| 64 | + roleMappings: roleMappings, |
| 65 | + nodeSecurityGroupTags: { |
| 66 | + "karpenter.sh/discovery": `${stackName}-cluster` |
| 67 | + }, |
| 68 | + createOidcProvider: true, |
| 69 | + clusterTags: { |
| 70 | + "karpenter.sh/discovery": `${stackName}-cluster`, |
| 71 | + }, |
| 72 | + nodeGroupOptions: { |
| 73 | + nodeSubnetIds: vpc.privateSubnetIds, |
| 74 | + nodeRootVolumeEncrypted: true, |
| 75 | + nodeRootVolumeType: "gp3", |
| 76 | + minSize: 1, |
| 77 | + maxSize: 50, |
| 78 | + desiredCapacity: 10, |
| 79 | + }, |
| 80 | +}) |
| 81 | + |
| 82 | +outputs.clusterName = eksCluster.eksCluster.name |
| 83 | +outputs.clusterApiEndpoint = eksCluster.core.endpoint |
| 84 | + |
| 85 | +const oidcProviderUrl = eksCluster.core.oidcProvider?.url as pulumi.Output<string> |
| 86 | + |
| 87 | +// If we are creating the hub cluster we need pods in eks cluster to be able to assume |
| 88 | +// so we need cluster created first |
| 89 | +if (config.require("clusterType") === "hub") { |
| 90 | + const argoRole = createArgoRole(config.require("awsAccountId"), oidcProviderUrl, config) |
| 91 | + outputs.argoRoleArn = argoRole.arn |
| 92 | +} |
| 93 | + |
| 94 | +// Create the GitOps Configuration for the given cluster, one method being to upload |
| 95 | +// a file to be added to Github Repo which GitOps Controller could manage or |
| 96 | +// we can create secret object and directly apply that to the cluster |
| 97 | +if (config.require("implementationType") === "github") { |
| 98 | + new GitOpsClusterConfig(outputs, config, eksCluster.eksCluster.certificateAuthority.data) |
| 99 | +} else if (config.require("implementationType") === "secret") { |
| 100 | + // TODO Implement |
| 101 | +} |
0 commit comments