From f4ea5e98c19708867f4a014b3758b4fce9c2adb5 Mon Sep 17 00:00:00 2001 From: MasterPtato Date: Sat, 28 Jun 2025 01:25:52 +0000 Subject: [PATCH] fix: api changes --- packages/common/api-helper/macros/src/lib.rs | 23 +- packages/common/fdb-util/src/keys.rs | 2 + .../errors/container/failed-to-create.md | 10 + .../container/logs/invalid-container-ids.md | 9 + .../errors/container/logs/no-container-ids.md | 9 + .../container/logs/no-valid-container-ids.md | 9 + .../container/metrics/invalid-interval.md | 9 + .../container/metrics/invalid-metrics.md | 9 + .../errors/container/metrics/no-metrics.md | 9 + .../container/metrics/unsupported-metrics.md | 9 + .../errors/container/not-found.md | 9 + packages/common/pools/src/db/sqlite/mod.rs | 4 +- packages/common/util/id/src/lib.rs | 7 +- packages/core/api/actor/src/assert.rs | 554 +++- .../core/api/actor/src/route/actors/logs.rs | 197 ++ .../api/actor/src/route/actors/metrics.rs | 336 +++ .../core/api/actor/src/route/actors/mod.rs | 866 ++++++ .../actor/src/route/{ => actors/v1}/logs.rs | 109 +- .../src/route/{ => actors/v1}/metrics.rs | 35 +- .../src/route/{actors.rs => actors/v1/mod.rs} | 146 +- packages/core/api/actor/src/route/builds.rs | 10 +- .../api/actor/src/route/containers/logs.rs | 213 ++ .../api/actor/src/route/containers/metrics.rs | 336 +++ .../api/actor/src/route/containers/mod.rs | 788 ++++++ packages/core/api/actor/src/route/mod.rs | 324 ++- packages/core/api/actor/src/route/regions.rs | 4 +- packages/core/api/status/src/route/actor.rs | 26 +- .../files/cadvisor_metric_exporter.sh | 2 +- packages/edge/api/actor/src/assert.rs | 46 + .../src/route/{actors.rs => actors/mod.rs} | 452 +--- .../edge/api/actor/src/route/actors/v1.rs | 690 +++++ .../edge/api/actor/src/route/containers.rs | 668 +++++ packages/edge/api/actor/src/route/mod.rs | 166 +- .../infra/client/container-runner/src/main.rs | 2 +- .../manager/src/image_download_handler.rs | 75 +- .../infra/client/manager/src/runner/mod.rs | 2 +- .../infra/client/manager/src/runner/setup.rs | 3 +- .../infra/guard/server/src/routing/actor.rs | 120 +- .../guard/server/src/routing/actor_routes.rs | 27 +- .../migrations/20200101000000_init.up.sql | 1 - .../edge/services/pegboard/src/keys/env.rs | 114 + packages/edge/services/pegboard/src/lib.rs | 2 +- .../pegboard/src/ops/actor/list_for_env.rs | 58 +- .../pegboard/src/ops/actor/log/read.rs | 2 +- .../services/pegboard/src/ops/actor/mod.rs | 2 +- .../actor/{ => v1}/allocate_ingress_ports.rs | 0 .../services/pegboard/src/ops/actor/v1/get.rs | 360 +++ .../pegboard/src/ops/actor/v1/list_for_env.rs | 98 + .../pegboard/src/ops/actor/v1/log/export.rs | 109 + .../pegboard/src/ops/actor/v1/log/mod.rs | 2 + .../pegboard/src/ops/actor/v1/log/read.rs | 196 ++ .../services/pegboard/src/ops/actor/v1/mod.rs | 4 + .../src/ops/container/list_for_env.rs | 96 + .../pegboard/src/ops/container/mod.rs | 3 + .../edge/services/pegboard/src/ops/mod.rs | 1 + packages/edge/services/pegboard/src/types.rs | 475 +++- .../pegboard/src/workflows/actor/analytics.rs | 14 +- .../pegboard/src/workflows/actor/destroy.rs | 407 ++- .../src/workflows/actor/migrations.rs | 46 +- .../pegboard/src/workflows/actor/mod.rs | 203 +- .../pegboard/src/workflows/actor/runtime.rs | 1149 ++++++-- .../pegboard/src/workflows/actor/setup.rs | 463 +++- .../{actor2 => actor/v1}/analytics.rs | 14 +- .../src/workflows/actor/v1/destroy.rs | 374 +++ .../{actor2 => actor/v1}/migrations.rs | 46 +- .../src/workflows/{actor2 => actor/v1}/mod.rs | 178 +- .../src/workflows/actor/v1/runtime.rs | 951 +++++++ .../workflows/{actor2 => actor/v1}/setup.rs | 453 +--- .../pegboard/src/workflows/actor2/destroy.rs | 536 ---- .../pegboard/src/workflows/actor2/runtime.rs | 1476 ----------- .../pegboard/src/workflows/client/mod.rs | 48 +- .../services/pegboard/src/workflows/mod.rs | 1 - scripts/openapi/gen_rust.ts | 14 +- .../fern/definition/actors/__package__.yml | 2 +- sdks/api/fern/definition/actors/common.yml | 13 - sdks/api/fern/definition/actors/logs.yml | 4 +- sdks/api/fern/definition/actors/metrics.yml | 6 +- .../fern/definition/actors/v1/__package__.yml | 180 ++ sdks/api/fern/definition/actors/v1/common.yml | 98 + sdks/api/fern/definition/actors/v1/logs.yml | 61 + .../api/fern/definition/actors/v1/metrics.yml | 39 + sdks/api/fern/definition/api.yml | 1 + sdks/api/fern/definition/builds/common.yml | 4 +- .../definition/containers/__package__.yml | 179 ++ .../api/fern/definition/containers/common.yml | 98 + sdks/api/fern/definition/containers/logs.yml | 63 + .../fern/definition/containers/metrics.yml | 38 + sdks/api/full/go/actors/actors.go | 1 - sdks/api/full/go/actors/client/client.go | 15 +- sdks/api/full/go/actors/logs.go | 2 +- sdks/api/full/go/actors/logs/client.go | 2 +- sdks/api/full/go/actors/metrics/client.go | 5 +- sdks/api/full/go/actors/types.go | 35 - sdks/api/full/go/actors/v1/client/client.go | 607 +++++ sdks/api/full/go/actors/v1/logs.go | 89 + sdks/api/full/go/actors/v1/logs/client.go | 138 + sdks/api/full/go/actors/v1/metrics.go | 50 + sdks/api/full/go/actors/v1/metrics/client.go | 130 + sdks/api/full/go/actors/v1/types.go | 572 ++++ sdks/api/full/go/actors/v1/v_1.go | 281 ++ sdks/api/full/go/client/client.go | 3 + sdks/api/full/go/containers/client/client.go | 606 +++++ sdks/api/full/go/containers/containers.go | 281 ++ sdks/api/full/go/containers/logs.go | 91 + sdks/api/full/go/containers/logs/client.go | 138 + sdks/api/full/go/containers/metrics.go | 50 + sdks/api/full/go/containers/metrics/client.go | 129 + sdks/api/full/go/containers/types.go | 572 ++++ sdks/api/full/openapi/openapi.yml | 2325 +++++++++++++++-- sdks/api/full/openapi_compat/openapi.yml | 2325 +++++++++++++++-- sdks/api/full/rust/.openapi-generator/FILES | 116 + sdks/api/full/rust/README.md | 79 + sdks/api/full/rust/docs/ActorsActor.md | 1 - sdks/api/full/rust/docs/ActorsApi.md | 9 + .../rust/docs/ActorsCreateActorRequest.md | 1 - sdks/api/full/rust/docs/ActorsLogsApi.md | 4 + sdks/api/full/rust/docs/ActorsMetricsApi.md | 4 +- sdks/api/full/rust/docs/ActorsV1Actor.md | 20 + sdks/api/full/rust/docs/ActorsV1Api.md | 213 ++ .../docs/ActorsV1CreateActorNetworkRequest.md | 13 + .../docs/ActorsV1CreateActorPortRequest.md | 13 + .../rust/docs/ActorsV1CreateActorRequest.md | 18 + .../rust/docs/ActorsV1CreateActorResponse.md | 11 + ...ctorsV1CreateActorRuntimeNetworkRequest.md | 11 + .../docs/ActorsV1CreateActorRuntimeRequest.md | 12 + .../full/rust/docs/ActorsV1EndpointType.md | 10 + .../rust/docs/ActorsV1GetActorLogsResponse.md | 16 + .../docs/ActorsV1GetActorMetricsResponse.md | 15 + .../rust/docs/ActorsV1GetActorResponse.md | 11 + sdks/api/full/rust/docs/ActorsV1Lifecycle.md | 12 + .../rust/docs/ActorsV1ListActorsResponse.md | 12 + sdks/api/full/rust/docs/ActorsV1LogsApi.md | 46 + sdks/api/full/rust/docs/ActorsV1MetricsApi.md | 44 + sdks/api/full/rust/docs/ActorsV1Network.md | 12 + .../api/full/rust/docs/ActorsV1NetworkMode.md | 10 + sdks/api/full/rust/docs/ActorsV1Port.md | 17 + .../full/rust/docs/ActorsV1PortProtocol.md | 10 + .../api/full/rust/docs/ActorsV1PortRouting.md | 12 + .../full/rust/docs/ActorsV1QueryLogStream.md | 10 + ...ctorsResources.md => ActorsV1Resources.md} | 2 +- sdks/api/full/rust/docs/ActorsV1Runtime.md | 13 + .../rust/docs/ActorsV1UpgradeActorRequest.md | 12 + .../docs/ActorsV1UpgradeAllActorsRequest.md | 13 + .../docs/ActorsV1UpgradeAllActorsResponse.md | 11 + sdks/api/full/rust/docs/ContainersApi.md | 213 ++ .../api/full/rust/docs/ContainersContainer.md | 20 + ...ContainersCreateContainerNetworkRequest.md | 13 + .../ContainersCreateContainerPortRequest.md | 13 + .../docs/ContainersCreateContainerRequest.md | 18 + .../docs/ContainersCreateContainerResponse.md | 11 + ...ersCreateContainerRuntimeNetworkRequest.md | 11 + ...ContainersCreateContainerRuntimeRequest.md | 12 + .../full/rust/docs/ContainersEndpointType.md | 10 + .../ContainersGetContainerLogsResponse.md | 17 + .../ContainersGetContainerMetricsResponse.md | 15 + .../docs/ContainersGetContainerResponse.md | 11 + .../api/full/rust/docs/ContainersLifecycle.md | 12 + .../docs/ContainersListContainersResponse.md | 12 + sdks/api/full/rust/docs/ContainersLogsApi.md | 46 + .../full/rust/docs/ContainersMetricsApi.md | 44 + sdks/api/full/rust/docs/ContainersNetwork.md | 12 + .../full/rust/docs/ContainersNetworkMode.md | 10 + sdks/api/full/rust/docs/ContainersPort.md | 17 + .../full/rust/docs/ContainersPortProtocol.md | 10 + .../full/rust/docs/ContainersPortRouting.md | 12 + .../rust/docs/ContainersQueryLogStream.md | 10 + .../rust/docs/ContainersResources.md} | 2 +- sdks/api/full/rust/docs/ContainersRuntime.md | 13 + .../ContainersUpgradeAllContainersRequest.md | 13 + .../ContainersUpgradeAllContainersResponse.md | 11 + .../docs/ContainersUpgradeContainerRequest.md | 12 + sdks/api/full/rust/src/apis/actors_api.rs | 12 +- .../api/full/rust/src/apis/actors_logs_api.rs | 2 +- .../full/rust/src/apis/actors_metrics_api.rs | 2 +- sdks/api/full/rust/src/apis/actors_v1_api.rs | 448 ++++ .../full/rust/src/apis/actors_v1_logs_api.rs | 102 + .../rust/src/apis/actors_v1_metrics_api.rs | 88 + sdks/api/full/rust/src/apis/containers_api.rs | 452 ++++ .../full/rust/src/apis/containers_logs_api.rs | 102 + .../rust/src/apis/containers_metrics_api.rs | 89 + sdks/api/full/rust/src/apis/mod.rs | 6 + sdks/api/full/rust/src/models/actors_actor.rs | 3 - .../src/models/actors_create_actor_request.rs | 3 - .../full/rust/src/models/actors_v1_actor.rs | 61 + .../actors_v1_create_actor_network_request.rs | 30 + .../actors_v1_create_actor_port_request.rs | 29 + .../models/actors_v1_create_actor_request.rs | 49 + .../models/actors_v1_create_actor_response.rs | 23 + ...v1_create_actor_runtime_network_request.rs | 23 + .../actors_v1_create_actor_runtime_request.rs | 26 + .../src/models/actors_v1_endpoint_type.rs | 33 + .../actors_v1_get_actor_logs_response.rs | 50 + .../actors_v1_get_actor_metrics_response.rs | 41 + .../models/actors_v1_get_actor_response.rs | 23 + .../rust/src/models/actors_v1_lifecycle.rs | 28 + .../models/actors_v1_list_actors_response.rs | 30 + .../full/rust/src/models/actors_v1_network.rs | 26 + .../rust/src/models/actors_v1_network_mode.rs | 33 + .../full/rust/src/models/actors_v1_port.rs | 45 + .../src/models/actors_v1_port_protocol.rs | 42 + .../rust/src/models/actors_v1_port_routing.rs | 26 + .../src/models/actors_v1_query_log_stream.rs | 36 + ...rs_resources.rs => actors_v1_resources.rs} | 8 +- .../full/rust/src/models/actors_v1_runtime.rs | 29 + .../models/actors_v1_upgrade_actor_request.rs | 31 + .../actors_v1_upgrade_all_actors_request.rs | 34 + .../actors_v1_upgrade_all_actors_response.rs | 21 + .../rust/src/models/containers_container.rs | 63 + ...ainers_create_container_network_request.rs | 31 + ...ontainers_create_container_port_request.rs | 31 + .../containers_create_container_request.rs | 52 + .../containers_create_container_response.rs | 23 + ...reate_container_runtime_network_request.rs | 23 + ...ainers_create_container_runtime_request.rs | 26 + .../src/models/containers_endpoint_type.rs | 33 + .../containers_get_container_logs_response.rs | 55 + ...ntainers_get_container_metrics_response.rs | 41 + .../containers_get_container_response.rs | 23 + .../rust/src/models/containers_lifecycle.rs | 28 + .../containers_list_containers_response.rs | 30 + .../rust/src/models/containers_network.rs | 26 + .../src/models/containers_network_mode.rs | 33 + .../full/rust/src/models/containers_port.rs | 45 + .../src/models/containers_port_protocol.rs | 42 + .../src/models/containers_port_routing.rs | 26 + .../src/models/containers_query_log_stream.rs | 36 + .../rust/src/models/containers_resources.rs | 25 + .../rust/src/models/containers_runtime.rs | 29 + ...ntainers_upgrade_all_containers_request.rs | 34 + ...tainers_upgrade_all_containers_response.rs | 21 + .../containers_upgrade_container_request.rs | 31 + sdks/api/full/rust/src/models/mod.rs | 101 + sdks/api/full/typescript/src/Client.ts | 6 + .../src/api/resources/actors/client/Client.ts | 34 +- .../requests/CreateActorRequestQuery.ts | 4 - .../actors/resources/common/types/Actor.ts | 1 - .../actors/resources/common/types/index.ts | 1 - .../api/resources/actors/resources/index.ts | 1 + .../actors/resources/logs/client/Client.ts | 4 +- .../logs/types/GetActorLogsResponse.ts | 2 +- .../actors/resources/metrics/client/Client.ts | 12 +- .../actors/resources/v1/client/Client.ts | 1061 ++++++++ .../actors/resources/v1/client/index.ts | 1 + .../requests/CreateActorRequestQuery.ts | 49 + .../requests/DestroyActorRequestQuery.ts | 20 + .../client/requests/GetActorsRequestQuery.ts | 25 + .../client/requests/ListActorsRequestQuery.ts | 19 + .../requests/UpgradeActorRequestQuery.ts | 24 + .../requests/UpgradeAllActorsRequestQuery.ts | 27 + .../resources/v1/client/requests/index.ts | 6 + .../resources/actors/resources/v1/index.ts | 3 + .../resources/v1/resources/common/index.ts | 1 + .../v1/resources/common/types/Actor.ts | 18 + .../v1/resources/common/types/EndpointType.ts | 9 + .../v1/resources/common/types/GuardRouting.ts | 5 + .../v1/resources/common/types/HostRouting.ts | 5 + .../v1/resources/common/types/Lifecycle.ts | 10 + .../v1/resources/common/types/Network.ts | 10 + .../v1/resources/common/types/NetworkMode.ts | 9 + .../v1/resources/common/types/Port.ts | 16 + .../v1/resources/common/types/PortProtocol.ts | 12 + .../v1/resources/common/types/PortRouting.ts | 10 + .../resources}/common/types/Resources.ts | 0 .../v1/resources/common/types/Runtime.ts | 9 + .../v1/resources/common/types/index.ts | 12 + .../actors/resources/v1/resources/index.ts | 8 + .../v1/resources/logs/client/Client.ts | 234 ++ .../v1/resources/logs/client/index.ts | 1 + .../requests/GetActorLogsRequestQuery.ts | 32 + .../resources/logs/client/requests/index.ts | 1 + .../resources/v1/resources/logs/index.ts | 2 + .../logs/types/GetActorLogsResponse.ts | 24 + .../v1/resources/logs/types/QueryLogStream.ts | 10 + .../v1/resources/logs/types/index.ts | 2 + .../v1/resources/metrics/client/Client.ts | 209 ++ .../v1/resources/metrics/client/index.ts | 1 + .../requests/GetActorMetricsRequestQuery.ts | 21 + .../metrics/client/requests/index.ts | 1 + .../resources/v1/resources/metrics/index.ts | 2 + .../metrics/types/GetActorMetricsResponse.ts | 11 + .../v1/resources/metrics/types/index.ts | 1 + .../v1/types/CreateActorNetworkRequest.ts | 11 + .../v1/types/CreateActorPortRequest.ts | 11 + .../resources/v1/types/CreateActorRequest.ts | 16 + .../resources/v1/types/CreateActorResponse.ts | 10 + .../types/CreateActorRuntimeNetworkRequest.ts | 9 + .../v1/types/CreateActorRuntimeRequest.ts | 10 + .../v1/types/DestroyActorResponse.ts | 5 + .../resources/v1/types/GetActorResponse.ts | 9 + .../resources/v1/types/ListActorsResponse.ts | 11 + .../resources/v1/types/UpgradeActorRequest.ts | 8 + .../v1/types/UpgradeActorResponse.ts | 5 + .../v1/types/UpgradeAllActorsRequest.ts | 9 + .../v1/types/UpgradeAllActorsResponse.ts | 7 + .../actors/resources/v1/types/index.ts | 13 + .../actors/types/CreateActorRequest.ts | 1 - .../api/resources/containers/client/Client.ts | 1067 ++++++++ .../api/resources/containers/client/index.ts | 1 + .../requests/CreateContainerRequestQuery.ts | 49 + .../requests/DestroyContainerRequestQuery.ts | 20 + .../requests/GetContainersRequestQuery.ts | 25 + .../requests/ListContainersRequestQuery.ts | 19 + .../UpgradeAllContainersRequestQuery.ts | 27 + .../requests/UpgradeContainerRequestQuery.ts | 24 + .../containers/client/requests/index.ts | 6 + .../src/api/resources/containers/index.ts | 3 + .../containers/resources/common/index.ts | 1 + .../resources/common/types/Container.ts | 18 + .../resources/common/types/EndpointType.ts | 9 + .../resources/common/types/GuardRouting.ts | 5 + .../resources/common/types/HostRouting.ts | 5 + .../resources/common/types/Lifecycle.ts | 10 + .../resources/common/types/Network.ts | 10 + .../resources/common/types/NetworkMode.ts | 9 + .../containers/resources/common/types/Port.ts | 16 + .../resources/common/types/PortProtocol.ts | 12 + .../resources/common/types/PortRouting.ts | 10 + .../resources/common/types/Resources.ts | 0 .../resources/common/types/Runtime.ts | 9 + .../resources/common/types/index.ts | 12 + .../resources/containers/resources/index.ts | 8 + .../resources/logs/client/Client.ts | 234 ++ .../containers/resources/logs/client/index.ts | 1 + .../requests/GetContainerLogsRequestQuery.ts | 32 + .../resources/logs/client/requests/index.ts | 1 + .../containers/resources/logs/index.ts | 2 + .../logs/types/GetContainerLogsResponse.ts | 26 + .../resources/logs/types/QueryLogStream.ts | 10 + .../containers/resources/logs/types/index.ts | 2 + .../resources/metrics/client/Client.ts | 209 ++ .../resources/metrics/client/index.ts | 1 + .../GetContainerMetricsRequestQuery.ts | 21 + .../metrics/client/requests/index.ts | 1 + .../containers/resources/metrics/index.ts | 2 + .../types/GetContainerMetricsResponse.ts | 11 + .../resources/metrics/types/index.ts | 1 + .../types/CreateContainerNetworkRequest.ts | 11 + .../types/CreateContainerPortRequest.ts | 11 + .../types/CreateContainerRequest.ts | 16 + .../types/CreateContainerResponse.ts | 10 + .../CreateContainerRuntimeNetworkRequest.ts | 9 + .../types/CreateContainerRuntimeRequest.ts | 10 + .../types/DestroyContainerResponse.ts | 5 + .../containers/types/GetContainerResponse.ts | 9 + .../types/ListContainersResponse.ts | 11 + .../types/UpgradeAllContainersRequest.ts | 9 + .../types/UpgradeAllContainersResponse.ts | 7 + .../types/UpgradeContainerRequest.ts | 8 + .../types/UpgradeContainerResponse.ts | 5 + .../api/resources/containers/types/index.ts | 13 + .../typescript/src/api/resources/index.ts | 1 + .../actors/resources/common/types/Actor.ts | 3 - .../actors/resources/common/types/index.ts | 1 - .../resources/actors/resources/index.ts | 1 + .../logs/types/GetActorLogsResponse.ts | 5 +- .../resources/actors/resources/v1/index.ts | 2 + .../resources/v1/resources/common/index.ts | 1 + .../v1/resources/common/types/Actor.ts | 41 + .../v1/resources/common/types/EndpointType.ts | 16 + .../v1/resources/common/types/GuardRouting.ts | 16 + .../v1/resources/common/types/HostRouting.ts | 16 + .../v1/resources/common/types/Lifecycle.ts | 22 + .../v1/resources/common/types/Network.ts | 22 + .../v1/resources/common/types/NetworkMode.ts | 16 + .../v1/resources/common/types/Port.ts | 32 + .../v1/resources/common/types/PortProtocol.ts | 16 + .../v1/resources/common/types/PortRouting.ts | 24 + .../v1/resources/common/types/Resources.ts | 22 + .../v1/resources/common/types/Runtime.ts | 22 + .../v1/resources/common/types/index.ts | 12 + .../actors/resources/v1/resources/index.ts | 6 + .../resources/v1/resources/logs/index.ts | 1 + .../logs/types/GetActorLogsResponse.ts | 32 + .../v1/resources/logs/types/QueryLogStream.ts | 16 + .../v1/resources/logs/types/index.ts | 2 + .../resources/v1/resources/metrics/index.ts | 1 + .../metrics/types/GetActorMetricsResponse.ts | 34 + .../v1/resources/metrics/types/index.ts | 1 + .../v1/types/CreateActorNetworkRequest.ts | 26 + .../v1/types/CreateActorPortRequest.ts | 26 + .../resources/v1/types/CreateActorRequest.ts | 38 + .../resources/v1/types/CreateActorResponse.ts | 21 + .../types/CreateActorRuntimeNetworkRequest.ts | 21 + .../v1/types/CreateActorRuntimeRequest.ts | 23 + .../v1/types/DestroyActorResponse.ts | 16 + .../resources/v1/types/GetActorResponse.ts | 21 + .../resources/v1/types/ListActorsResponse.ts | 24 + .../resources/v1/types/UpgradeActorRequest.ts | 22 + .../v1/types/UpgradeActorResponse.ts | 16 + .../v1/types/UpgradeAllActorsRequest.ts | 24 + .../v1/types/UpgradeAllActorsResponse.ts | 20 + .../actors/resources/v1/types/index.ts | 13 + .../actors/types/CreateActorRequest.ts | 3 - .../resources/containers/index.ts | 2 + .../containers/resources/common/index.ts | 1 + .../resources/common/types/Container.ts | 44 + .../resources/common/types/EndpointType.ts | 16 + .../resources/common/types/GuardRouting.ts | 16 + .../resources/common/types/HostRouting.ts | 16 + .../resources/common/types/Lifecycle.ts | 22 + .../resources/common/types/Network.ts | 22 + .../resources/common/types/NetworkMode.ts | 16 + .../containers/resources/common/types/Port.ts | 32 + .../resources/common/types/PortProtocol.ts | 16 + .../resources/common/types/PortRouting.ts | 24 + .../resources/common/types/Resources.ts | 12 +- .../resources/common/types/Runtime.ts | 22 + .../resources/common/types/index.ts | 12 + .../resources/containers/resources/index.ts | 6 + .../containers/resources/logs/index.ts | 1 + .../logs/types/GetContainerLogsResponse.ts | 38 + .../resources/logs/types/QueryLogStream.ts | 16 + .../containers/resources/logs/types/index.ts | 2 + .../containers/resources/metrics/index.ts | 1 + .../types/GetContainerMetricsResponse.ts | 34 + .../resources/metrics/types/index.ts | 1 + .../types/CreateContainerNetworkRequest.ts | 26 + .../types/CreateContainerPortRequest.ts | 26 + .../types/CreateContainerRequest.ts | 38 + .../types/CreateContainerResponse.ts | 21 + .../CreateContainerRuntimeNetworkRequest.ts | 21 + .../types/CreateContainerRuntimeRequest.ts | 23 + .../types/DestroyContainerResponse.ts | 16 + .../containers/types/GetContainerResponse.ts | 21 + .../types/ListContainersResponse.ts | 24 + .../types/UpgradeAllContainersRequest.ts | 24 + .../types/UpgradeAllContainersResponse.ts | 20 + .../types/UpgradeContainerRequest.ts | 22 + .../types/UpgradeContainerResponse.ts | 16 + .../resources/containers/types/index.ts | 13 + .../src/serialization/resources/index.ts | 1 + sdks/api/runtime/go/actors/actors.go | 1 - sdks/api/runtime/go/actors/client/client.go | 17 +- sdks/api/runtime/go/actors/logs.go | 2 +- sdks/api/runtime/go/actors/logs/client.go | 2 +- sdks/api/runtime/go/actors/metrics.go | 50 + sdks/api/runtime/go/actors/metrics/client.go | 129 + sdks/api/runtime/go/actors/types.go | 35 - sdks/api/runtime/go/client/client.go | 25 +- .../runtime/go/containers/client/client.go | 606 +++++ sdks/api/runtime/go/containers/containers.go | 281 ++ sdks/api/runtime/go/containers/logs.go | 91 + sdks/api/runtime/go/containers/logs/client.go | 138 + sdks/api/runtime/go/containers/metrics.go | 50 + .../runtime/go/containers/metrics/client.go | 129 + sdks/api/runtime/go/containers/types.go | 572 ++++ sdks/api/runtime/openapi/openapi.yml | 1347 ++++++++-- sdks/api/runtime/openapi_compat/openapi.yml | 1347 ++++++++-- .../api/runtime/rust/.openapi-generator/FILES | 69 + sdks/api/runtime/rust/README.md | 48 + sdks/api/runtime/rust/docs/ActorsActor.md | 1 - sdks/api/runtime/rust/docs/ActorsApi.md | 9 + .../rust/docs/ActorsCreateActorRequest.md | 1 - .../docs/ActorsGetActorMetricsResponse.md | 15 + sdks/api/runtime/rust/docs/ActorsLogsApi.md | 4 + .../api/runtime/rust/docs/ActorsMetricsApi.md | 44 + sdks/api/runtime/rust/docs/ContainersApi.md | 213 ++ .../runtime/rust/docs/ContainersContainer.md | 20 + ...ContainersCreateContainerNetworkRequest.md | 13 + .../ContainersCreateContainerPortRequest.md | 13 + .../docs/ContainersCreateContainerRequest.md | 18 + .../docs/ContainersCreateContainerResponse.md | 11 + ...ersCreateContainerRuntimeNetworkRequest.md | 11 + ...ContainersCreateContainerRuntimeRequest.md | 12 + .../rust/docs/ContainersEndpointType.md | 10 + .../ContainersGetContainerLogsResponse.md | 17 + .../ContainersGetContainerMetricsResponse.md | 15 + .../docs/ContainersGetContainerResponse.md | 11 + .../runtime/rust/docs/ContainersLifecycle.md | 12 + .../docs/ContainersListContainersResponse.md | 12 + .../runtime/rust/docs/ContainersLogsApi.md | 46 + .../runtime/rust/docs/ContainersMetricsApi.md | 44 + .../runtime/rust/docs/ContainersNetwork.md | 12 + .../rust/docs/ContainersNetworkMode.md | 10 + sdks/api/runtime/rust/docs/ContainersPort.md | 17 + .../rust/docs/ContainersPortProtocol.md | 10 + .../rust/docs/ContainersPortRouting.md | 12 + .../rust/docs/ContainersQueryLogStream.md | 10 + .../runtime/rust/docs/ContainersResources.md | 12 + .../runtime/rust/docs/ContainersRuntime.md | 13 + .../ContainersUpgradeAllContainersRequest.md | 13 + .../ContainersUpgradeAllContainersResponse.md | 11 + .../docs/ContainersUpgradeContainerRequest.md | 12 + sdks/api/runtime/rust/src/apis/actors_api.rs | 12 +- .../runtime/rust/src/apis/actors_logs_api.rs | 2 +- .../rust/src/apis/actors_metrics_api.rs | 71 + .../runtime/rust/src/apis/containers_api.rs | 342 +++ .../rust/src/apis/containers_logs_api.rs | 82 + .../rust/src/apis/containers_metrics_api.rs | 71 + sdks/api/runtime/rust/src/apis/mod.rs | 4 + sdks/api/runtime/rust/src/apis/mod.rs.orig | 4 + .../runtime/rust/src/models/actors_actor.rs | 3 - .../src/models/actors_create_actor_request.rs | 3 - .../actors_get_actor_metrics_response.rs | 40 + .../rust/src/models/containers_container.rs | 59 + ...ainers_create_container_network_request.rs | 34 + ...ontainers_create_container_port_request.rs | 34 + .../containers_create_container_request.rs | 49 + .../containers_create_container_response.rs | 28 + ...reate_container_runtime_network_request.rs | 28 + ...ainers_create_container_runtime_request.rs | 31 + .../src/models/containers_endpoint_type.rs | 39 + .../containers_get_container_logs_response.rs | 52 + ...ntainers_get_container_metrics_response.rs | 40 + .../containers_get_container_response.rs | 28 + .../rust/src/models/containers_lifecycle.rs | 33 + .../containers_list_containers_response.rs | 32 + .../rust/src/models/containers_network.rs | 31 + .../src/models/containers_network_mode.rs | 39 + .../rust/src/models/containers_port.rs | 47 + .../src/models/containers_port_protocol.rs | 48 + .../src/models/containers_port_routing.rs | 31 + .../src/models/containers_query_log_stream.rs | 42 + ...s_resources.rs => containers_resources.rs} | 8 +- .../rust/src/models/containers_runtime.rs | 34 + ...ntainers_upgrade_all_containers_request.rs | 34 + ...tainers_upgrade_all_containers_response.rs | 28 + .../containers_upgrade_container_request.rs | 31 + sdks/api/runtime/rust/src/models/mod.rs | 55 + sdks/api/runtime/typescript/src/Client.ts | 6 + .../src/api/resources/actors/client/Client.ts | 34 +- .../requests/CreateActorRequestQuery.ts | 4 - .../actors/resources/common/types/Actor.ts | 1 - .../actors/resources/common/types/index.ts | 1 - .../api/resources/actors/resources/index.ts | 3 + .../actors/resources/logs/client/Client.ts | 4 +- .../logs/types/GetActorLogsResponse.ts | 2 +- .../actors/resources/metrics/client/Client.ts | 209 ++ .../actors/resources/metrics/client/index.ts | 1 + .../requests/GetActorMetricsRequestQuery.ts | 21 + .../metrics/client/requests/index.ts | 1 + .../actors/resources/metrics/index.ts | 2 + .../metrics/types/GetActorMetricsResponse.ts | 11 + .../actors/resources/metrics/types/index.ts | 1 + .../actors/types/CreateActorRequest.ts | 1 - .../api/resources/containers/client/Client.ts | 1067 ++++++++ .../api/resources/containers/client/index.ts | 1 + .../requests/CreateContainerRequestQuery.ts | 49 + .../requests/DestroyContainerRequestQuery.ts | 20 + .../requests/GetContainersRequestQuery.ts | 25 + .../requests/ListContainersRequestQuery.ts | 19 + .../UpgradeAllContainersRequestQuery.ts | 27 + .../requests/UpgradeContainerRequestQuery.ts | 24 + .../containers/client/requests/index.ts | 6 + .../src/api/resources/containers/index.ts | 3 + .../containers/resources/common/index.ts | 1 + .../resources/common/types/Container.ts | 18 + .../resources/common/types/EndpointType.ts | 9 + .../resources/common/types/GuardRouting.ts | 5 + .../resources/common/types/HostRouting.ts | 5 + .../resources/common/types/Lifecycle.ts | 10 + .../resources/common/types/Network.ts | 10 + .../resources/common/types/NetworkMode.ts | 9 + .../containers/resources/common/types/Port.ts | 16 + .../resources/common/types/PortProtocol.ts | 12 + .../resources/common/types/PortRouting.ts | 10 + .../resources/common/types/Resources.ts | 14 + .../resources/common/types/Runtime.ts | 9 + .../resources/common/types/index.ts | 12 + .../resources/containers/resources/index.ts | 8 + .../resources/logs/client/Client.ts | 234 ++ .../containers/resources/logs/client/index.ts | 1 + .../requests/GetContainerLogsRequestQuery.ts | 32 + .../resources/logs/client/requests/index.ts | 1 + .../containers/resources/logs/index.ts | 2 + .../logs/types/GetContainerLogsResponse.ts | 26 + .../resources/logs/types/QueryLogStream.ts | 10 + .../containers/resources/logs/types/index.ts | 2 + .../resources/metrics/client/Client.ts | 209 ++ .../resources/metrics/client/index.ts | 1 + .../GetContainerMetricsRequestQuery.ts | 21 + .../metrics/client/requests/index.ts | 1 + .../containers/resources/metrics/index.ts | 2 + .../types/GetContainerMetricsResponse.ts | 11 + .../resources/metrics/types/index.ts | 1 + .../types/CreateContainerNetworkRequest.ts | 11 + .../types/CreateContainerPortRequest.ts | 11 + .../types/CreateContainerRequest.ts | 16 + .../types/CreateContainerResponse.ts | 10 + .../CreateContainerRuntimeNetworkRequest.ts | 9 + .../types/CreateContainerRuntimeRequest.ts | 10 + .../types/DestroyContainerResponse.ts | 5 + .../containers/types/GetContainerResponse.ts | 9 + .../types/ListContainersResponse.ts | 11 + .../types/UpgradeAllContainersRequest.ts | 9 + .../types/UpgradeAllContainersResponse.ts | 7 + .../types/UpgradeContainerRequest.ts | 8 + .../types/UpgradeContainerResponse.ts | 5 + .../api/resources/containers/types/index.ts | 13 + .../typescript/src/api/resources/index.ts | 1 + .../actors/resources/common/types/Actor.ts | 3 - .../actors/resources/common/types/index.ts | 1 - .../resources/actors/resources/index.ts | 5 + .../logs/types/GetActorLogsResponse.ts | 5 +- .../actors/resources/metrics/index.ts | 1 + .../metrics/types/GetActorMetricsResponse.ts | 34 + .../actors/resources/metrics/types/index.ts | 1 + .../actors/types/CreateActorRequest.ts | 3 - .../resources/containers/index.ts | 2 + .../containers/resources/common/index.ts | 1 + .../resources/common/types/Container.ts | 44 + .../resources/common/types/EndpointType.ts | 16 + .../resources/common/types/GuardRouting.ts | 16 + .../resources/common/types/HostRouting.ts | 16 + .../resources/common/types/Lifecycle.ts | 22 + .../resources/common/types/Network.ts | 22 + .../resources/common/types/NetworkMode.ts | 16 + .../containers/resources/common/types/Port.ts | 32 + .../resources/common/types/PortProtocol.ts | 16 + .../resources/common/types/PortRouting.ts | 24 + .../resources/common/types/Resources.ts | 12 +- .../resources/common/types/Runtime.ts | 22 + .../resources/common/types/index.ts | 12 + .../resources/containers/resources/index.ts | 6 + .../containers/resources/logs/index.ts | 1 + .../logs/types/GetContainerLogsResponse.ts | 38 + .../resources/logs/types/QueryLogStream.ts | 16 + .../containers/resources/logs/types/index.ts | 2 + .../containers/resources/metrics/index.ts | 1 + .../types/GetContainerMetricsResponse.ts | 34 + .../resources/metrics/types/index.ts | 1 + .../types/CreateContainerNetworkRequest.ts | 26 + .../types/CreateContainerPortRequest.ts | 26 + .../types/CreateContainerRequest.ts | 38 + .../types/CreateContainerResponse.ts | 21 + .../CreateContainerRuntimeNetworkRequest.ts | 21 + .../types/CreateContainerRuntimeRequest.ts | 23 + .../types/DestroyContainerResponse.ts | 16 + .../containers/types/GetContainerResponse.ts | 21 + .../types/ListContainersResponse.ts | 24 + .../types/UpgradeAllContainersRequest.ts | 24 + .../types/UpgradeAllContainersResponse.ts | 20 + .../types/UpgradeContainerRequest.ts | 22 + .../types/UpgradeContainerResponse.ts | 16 + .../resources/containers/types/index.ts | 13 + .../src/serialization/resources/index.ts | 1 + .../src/content/docs/api/actors/v1/create.mdx | 41 + .../content/docs/api/actors/v1/destroy.mdx | 37 + site/src/content/docs/api/actors/v1/get.mdx | 37 + site/src/content/docs/api/actors/v1/list.mdx | 37 + .../content/docs/api/actors/v1/logs/get.mdx | 37 + .../docs/api/actors/v1/metrics/get.mdx | 37 + .../docs/api/actors/v1/upgrade-all.mdx | 41 + .../content/docs/api/actors/v1/upgrade.mdx | 41 + .../content/docs/cloud/api/actors/create.mdx | 4 +- .../content/docs/cloud/api/actors/destroy.mdx | 4 +- .../src/content/docs/cloud/api/actors/get.mdx | 4 +- .../content/docs/cloud/api/actors/list.mdx | 4 +- .../docs/cloud/api/actors/logs/get.mdx | 4 +- .../docs/cloud/api/actors/metrics/get.mdx | 6 +- .../docs/cloud/api/actors/upgrade-all.mdx | 4 +- .../content/docs/cloud/api/actors/upgrade.mdx | 4 +- site/src/content/docs/cloud/api/errors.mdx | 59 + site/src/generated/apiPages.json | 72 +- 654 files changed, 39227 insertions(+), 4952 deletions(-) create mode 100644 packages/common/formatted-error/errors/container/failed-to-create.md create mode 100644 packages/common/formatted-error/errors/container/logs/invalid-container-ids.md create mode 100644 packages/common/formatted-error/errors/container/logs/no-container-ids.md create mode 100644 packages/common/formatted-error/errors/container/logs/no-valid-container-ids.md create mode 100644 packages/common/formatted-error/errors/container/metrics/invalid-interval.md create mode 100644 packages/common/formatted-error/errors/container/metrics/invalid-metrics.md create mode 100644 packages/common/formatted-error/errors/container/metrics/no-metrics.md create mode 100644 packages/common/formatted-error/errors/container/metrics/unsupported-metrics.md create mode 100644 packages/common/formatted-error/errors/container/not-found.md create mode 100644 packages/core/api/actor/src/route/actors/logs.rs create mode 100644 packages/core/api/actor/src/route/actors/metrics.rs create mode 100644 packages/core/api/actor/src/route/actors/mod.rs rename packages/core/api/actor/src/route/{ => actors/v1}/logs.rs (76%) rename packages/core/api/actor/src/route/{ => actors/v1}/metrics.rs (93%) rename packages/core/api/actor/src/route/{actors.rs => actors/v1/mod.rs} (93%) create mode 100644 packages/core/api/actor/src/route/containers/logs.rs create mode 100644 packages/core/api/actor/src/route/containers/metrics.rs create mode 100644 packages/core/api/actor/src/route/containers/mod.rs rename packages/edge/api/actor/src/route/{actors.rs => actors/mod.rs} (56%) create mode 100644 packages/edge/api/actor/src/route/actors/v1.rs create mode 100644 packages/edge/api/actor/src/route/containers.rs rename packages/edge/services/pegboard/src/ops/actor/{ => v1}/allocate_ingress_ports.rs (100%) create mode 100644 packages/edge/services/pegboard/src/ops/actor/v1/get.rs create mode 100644 packages/edge/services/pegboard/src/ops/actor/v1/list_for_env.rs create mode 100644 packages/edge/services/pegboard/src/ops/actor/v1/log/export.rs create mode 100644 packages/edge/services/pegboard/src/ops/actor/v1/log/mod.rs create mode 100644 packages/edge/services/pegboard/src/ops/actor/v1/log/read.rs create mode 100644 packages/edge/services/pegboard/src/ops/actor/v1/mod.rs create mode 100644 packages/edge/services/pegboard/src/ops/container/list_for_env.rs create mode 100644 packages/edge/services/pegboard/src/ops/container/mod.rs rename packages/edge/services/pegboard/src/workflows/{actor2 => actor/v1}/analytics.rs (95%) create mode 100644 packages/edge/services/pegboard/src/workflows/actor/v1/destroy.rs rename packages/edge/services/pegboard/src/workflows/{actor2 => actor/v1}/migrations.rs (64%) rename packages/edge/services/pegboard/src/workflows/{actor2 => actor/v1}/mod.rs (76%) create mode 100644 packages/edge/services/pegboard/src/workflows/actor/v1/runtime.rs rename packages/edge/services/pegboard/src/workflows/{actor2 => actor/v1}/setup.rs (61%) delete mode 100644 packages/edge/services/pegboard/src/workflows/actor2/destroy.rs delete mode 100644 packages/edge/services/pegboard/src/workflows/actor2/runtime.rs create mode 100644 sdks/api/fern/definition/actors/v1/__package__.yml create mode 100644 sdks/api/fern/definition/actors/v1/common.yml create mode 100644 sdks/api/fern/definition/actors/v1/logs.yml create mode 100644 sdks/api/fern/definition/actors/v1/metrics.yml create mode 100644 sdks/api/fern/definition/containers/__package__.yml create mode 100644 sdks/api/fern/definition/containers/common.yml create mode 100644 sdks/api/fern/definition/containers/logs.yml create mode 100644 sdks/api/fern/definition/containers/metrics.yml create mode 100644 sdks/api/full/go/actors/v1/client/client.go create mode 100644 sdks/api/full/go/actors/v1/logs.go create mode 100644 sdks/api/full/go/actors/v1/logs/client.go create mode 100644 sdks/api/full/go/actors/v1/metrics.go create mode 100644 sdks/api/full/go/actors/v1/metrics/client.go create mode 100644 sdks/api/full/go/actors/v1/types.go create mode 100644 sdks/api/full/go/actors/v1/v_1.go create mode 100644 sdks/api/full/go/containers/client/client.go create mode 100644 sdks/api/full/go/containers/containers.go create mode 100644 sdks/api/full/go/containers/logs.go create mode 100644 sdks/api/full/go/containers/logs/client.go create mode 100644 sdks/api/full/go/containers/metrics.go create mode 100644 sdks/api/full/go/containers/metrics/client.go create mode 100644 sdks/api/full/go/containers/types.go create mode 100644 sdks/api/full/rust/docs/ActorsV1Actor.md create mode 100644 sdks/api/full/rust/docs/ActorsV1Api.md create mode 100644 sdks/api/full/rust/docs/ActorsV1CreateActorNetworkRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1CreateActorPortRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1CreateActorRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1CreateActorResponse.md create mode 100644 sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeNetworkRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1EndpointType.md create mode 100644 sdks/api/full/rust/docs/ActorsV1GetActorLogsResponse.md create mode 100644 sdks/api/full/rust/docs/ActorsV1GetActorMetricsResponse.md create mode 100644 sdks/api/full/rust/docs/ActorsV1GetActorResponse.md create mode 100644 sdks/api/full/rust/docs/ActorsV1Lifecycle.md create mode 100644 sdks/api/full/rust/docs/ActorsV1ListActorsResponse.md create mode 100644 sdks/api/full/rust/docs/ActorsV1LogsApi.md create mode 100644 sdks/api/full/rust/docs/ActorsV1MetricsApi.md create mode 100644 sdks/api/full/rust/docs/ActorsV1Network.md create mode 100644 sdks/api/full/rust/docs/ActorsV1NetworkMode.md create mode 100644 sdks/api/full/rust/docs/ActorsV1Port.md create mode 100644 sdks/api/full/rust/docs/ActorsV1PortProtocol.md create mode 100644 sdks/api/full/rust/docs/ActorsV1PortRouting.md create mode 100644 sdks/api/full/rust/docs/ActorsV1QueryLogStream.md rename sdks/api/full/rust/docs/{ActorsResources.md => ActorsV1Resources.md} (96%) create mode 100644 sdks/api/full/rust/docs/ActorsV1Runtime.md create mode 100644 sdks/api/full/rust/docs/ActorsV1UpgradeActorRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsRequest.md create mode 100644 sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersApi.md create mode 100644 sdks/api/full/rust/docs/ContainersContainer.md create mode 100644 sdks/api/full/rust/docs/ContainersCreateContainerNetworkRequest.md create mode 100644 sdks/api/full/rust/docs/ContainersCreateContainerPortRequest.md create mode 100644 sdks/api/full/rust/docs/ContainersCreateContainerRequest.md create mode 100644 sdks/api/full/rust/docs/ContainersCreateContainerResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md create mode 100644 sdks/api/full/rust/docs/ContainersCreateContainerRuntimeRequest.md create mode 100644 sdks/api/full/rust/docs/ContainersEndpointType.md create mode 100644 sdks/api/full/rust/docs/ContainersGetContainerLogsResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersGetContainerMetricsResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersGetContainerResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersLifecycle.md create mode 100644 sdks/api/full/rust/docs/ContainersListContainersResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersLogsApi.md create mode 100644 sdks/api/full/rust/docs/ContainersMetricsApi.md create mode 100644 sdks/api/full/rust/docs/ContainersNetwork.md create mode 100644 sdks/api/full/rust/docs/ContainersNetworkMode.md create mode 100644 sdks/api/full/rust/docs/ContainersPort.md create mode 100644 sdks/api/full/rust/docs/ContainersPortProtocol.md create mode 100644 sdks/api/full/rust/docs/ContainersPortRouting.md create mode 100644 sdks/api/full/rust/docs/ContainersQueryLogStream.md rename sdks/api/{runtime/rust/docs/ActorsResources.md => full/rust/docs/ContainersResources.md} (95%) create mode 100644 sdks/api/full/rust/docs/ContainersRuntime.md create mode 100644 sdks/api/full/rust/docs/ContainersUpgradeAllContainersRequest.md create mode 100644 sdks/api/full/rust/docs/ContainersUpgradeAllContainersResponse.md create mode 100644 sdks/api/full/rust/docs/ContainersUpgradeContainerRequest.md create mode 100644 sdks/api/full/rust/src/apis/actors_v1_api.rs create mode 100644 sdks/api/full/rust/src/apis/actors_v1_logs_api.rs create mode 100644 sdks/api/full/rust/src/apis/actors_v1_metrics_api.rs create mode 100644 sdks/api/full/rust/src/apis/containers_api.rs create mode 100644 sdks/api/full/rust/src/apis/containers_logs_api.rs create mode 100644 sdks/api/full/rust/src/apis/containers_metrics_api.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_actor.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_create_actor_network_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_create_actor_port_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_create_actor_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_create_actor_response.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_network_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_endpoint_type.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_get_actor_logs_response.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_get_actor_metrics_response.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_get_actor_response.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_lifecycle.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_list_actors_response.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_network.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_network_mode.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_port.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_port_protocol.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_port_routing.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_query_log_stream.rs rename sdks/api/full/rust/src/models/{actors_resources.rs => actors_v1_resources.rs} (79%) create mode 100644 sdks/api/full/rust/src/models/actors_v1_runtime.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_upgrade_actor_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_request.rs create mode 100644 sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_container.rs create mode 100644 sdks/api/full/rust/src/models/containers_create_container_network_request.rs create mode 100644 sdks/api/full/rust/src/models/containers_create_container_port_request.rs create mode 100644 sdks/api/full/rust/src/models/containers_create_container_request.rs create mode 100644 sdks/api/full/rust/src/models/containers_create_container_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_create_container_runtime_network_request.rs create mode 100644 sdks/api/full/rust/src/models/containers_create_container_runtime_request.rs create mode 100644 sdks/api/full/rust/src/models/containers_endpoint_type.rs create mode 100644 sdks/api/full/rust/src/models/containers_get_container_logs_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_get_container_metrics_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_get_container_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_lifecycle.rs create mode 100644 sdks/api/full/rust/src/models/containers_list_containers_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_network.rs create mode 100644 sdks/api/full/rust/src/models/containers_network_mode.rs create mode 100644 sdks/api/full/rust/src/models/containers_port.rs create mode 100644 sdks/api/full/rust/src/models/containers_port_protocol.rs create mode 100644 sdks/api/full/rust/src/models/containers_port_routing.rs create mode 100644 sdks/api/full/rust/src/models/containers_query_log_stream.rs create mode 100644 sdks/api/full/rust/src/models/containers_resources.rs create mode 100644 sdks/api/full/rust/src/models/containers_runtime.rs create mode 100644 sdks/api/full/rust/src/models/containers_upgrade_all_containers_request.rs create mode 100644 sdks/api/full/rust/src/models/containers_upgrade_all_containers_response.rs create mode 100644 sdks/api/full/rust/src/models/containers_upgrade_container_request.rs create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/Client.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/CreateActorRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/DestroyActorRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/GetActorsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/ListActorsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeActorRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeAllActorsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Actor.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/EndpointType.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/GuardRouting.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/HostRouting.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Lifecycle.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Network.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/NetworkMode.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Port.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortProtocol.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortRouting.ts rename sdks/api/full/typescript/src/api/resources/actors/resources/{ => v1/resources}/common/types/Resources.ts (100%) create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Runtime.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/Client.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/GetActorLogsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/Client.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorPortRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/DestroyActorResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/GetActorResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/ListActorsResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/Client.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/client/requests/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Container.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Network.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Port.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts rename sdks/api/{runtime/typescript/src/api/resources/actors => full/typescript/src/api/resources/containers}/resources/common/types/Resources.ts (100%) create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Runtime.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/common/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/Client.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/Client.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/index.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/GetContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/ListContainersResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/api/resources/containers/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Actor.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/EndpointType.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/GuardRouting.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/HostRouting.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Lifecycle.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Network.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/NetworkMode.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Port.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortProtocol.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortRouting.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Resources.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Runtime.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorPortRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/DestroyActorResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/GetActorResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/ListActorsResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Container.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Network.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Port.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts rename sdks/api/full/typescript/src/serialization/resources/{actors => containers}/resources/common/types/Resources.ts (58%) create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts create mode 100644 sdks/api/full/typescript/src/serialization/resources/containers/types/index.ts create mode 100644 sdks/api/runtime/go/actors/metrics.go create mode 100644 sdks/api/runtime/go/actors/metrics/client.go create mode 100644 sdks/api/runtime/go/containers/client/client.go create mode 100644 sdks/api/runtime/go/containers/containers.go create mode 100644 sdks/api/runtime/go/containers/logs.go create mode 100644 sdks/api/runtime/go/containers/logs/client.go create mode 100644 sdks/api/runtime/go/containers/metrics.go create mode 100644 sdks/api/runtime/go/containers/metrics/client.go create mode 100644 sdks/api/runtime/go/containers/types.go create mode 100644 sdks/api/runtime/rust/docs/ActorsGetActorMetricsResponse.md create mode 100644 sdks/api/runtime/rust/docs/ActorsMetricsApi.md create mode 100644 sdks/api/runtime/rust/docs/ContainersApi.md create mode 100644 sdks/api/runtime/rust/docs/ContainersContainer.md create mode 100644 sdks/api/runtime/rust/docs/ContainersCreateContainerNetworkRequest.md create mode 100644 sdks/api/runtime/rust/docs/ContainersCreateContainerPortRequest.md create mode 100644 sdks/api/runtime/rust/docs/ContainersCreateContainerRequest.md create mode 100644 sdks/api/runtime/rust/docs/ContainersCreateContainerResponse.md create mode 100644 sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md create mode 100644 sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeRequest.md create mode 100644 sdks/api/runtime/rust/docs/ContainersEndpointType.md create mode 100644 sdks/api/runtime/rust/docs/ContainersGetContainerLogsResponse.md create mode 100644 sdks/api/runtime/rust/docs/ContainersGetContainerMetricsResponse.md create mode 100644 sdks/api/runtime/rust/docs/ContainersGetContainerResponse.md create mode 100644 sdks/api/runtime/rust/docs/ContainersLifecycle.md create mode 100644 sdks/api/runtime/rust/docs/ContainersListContainersResponse.md create mode 100644 sdks/api/runtime/rust/docs/ContainersLogsApi.md create mode 100644 sdks/api/runtime/rust/docs/ContainersMetricsApi.md create mode 100644 sdks/api/runtime/rust/docs/ContainersNetwork.md create mode 100644 sdks/api/runtime/rust/docs/ContainersNetworkMode.md create mode 100644 sdks/api/runtime/rust/docs/ContainersPort.md create mode 100644 sdks/api/runtime/rust/docs/ContainersPortProtocol.md create mode 100644 sdks/api/runtime/rust/docs/ContainersPortRouting.md create mode 100644 sdks/api/runtime/rust/docs/ContainersQueryLogStream.md create mode 100644 sdks/api/runtime/rust/docs/ContainersResources.md create mode 100644 sdks/api/runtime/rust/docs/ContainersRuntime.md create mode 100644 sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersRequest.md create mode 100644 sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersResponse.md create mode 100644 sdks/api/runtime/rust/docs/ContainersUpgradeContainerRequest.md create mode 100644 sdks/api/runtime/rust/src/apis/actors_metrics_api.rs create mode 100644 sdks/api/runtime/rust/src/apis/containers_api.rs create mode 100644 sdks/api/runtime/rust/src/apis/containers_logs_api.rs create mode 100644 sdks/api/runtime/rust/src/apis/containers_metrics_api.rs create mode 100644 sdks/api/runtime/rust/src/models/actors_get_actor_metrics_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_container.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_create_container_network_request.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_create_container_port_request.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_create_container_request.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_create_container_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_create_container_runtime_network_request.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_create_container_runtime_request.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_endpoint_type.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_get_container_logs_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_get_container_metrics_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_get_container_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_lifecycle.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_list_containers_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_network.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_network_mode.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_port.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_port_protocol.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_port_routing.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_query_log_stream.rs rename sdks/api/runtime/rust/src/models/{actors_resources.rs => containers_resources.rs} (81%) create mode 100644 sdks/api/runtime/rust/src/models/containers_runtime.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_request.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_response.rs create mode 100644 sdks/api/runtime/rust/src/models/containers_upgrade_container_request.rs create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/Client.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/Client.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/client/requests/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Container.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Network.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Port.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Resources.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Runtime.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/Client.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/Client.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/GetContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/ListContainersResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/api/resources/containers/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Container.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Network.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Port.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts rename sdks/api/runtime/typescript/src/serialization/resources/{actors => containers}/resources/common/types/Resources.ts (58%) create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts create mode 100644 sdks/api/runtime/typescript/src/serialization/resources/containers/types/index.ts create mode 100644 site/src/content/docs/api/actors/v1/create.mdx create mode 100644 site/src/content/docs/api/actors/v1/destroy.mdx create mode 100644 site/src/content/docs/api/actors/v1/get.mdx create mode 100644 site/src/content/docs/api/actors/v1/list.mdx create mode 100644 site/src/content/docs/api/actors/v1/logs/get.mdx create mode 100644 site/src/content/docs/api/actors/v1/metrics/get.mdx create mode 100644 site/src/content/docs/api/actors/v1/upgrade-all.mdx create mode 100644 site/src/content/docs/api/actors/v1/upgrade.mdx diff --git a/packages/common/api-helper/macros/src/lib.rs b/packages/common/api-helper/macros/src/lib.rs index c067f5c707..b2b93838f5 100644 --- a/packages/common/api-helper/macros/src/lib.rs +++ b/packages/common/api-helper/macros/src/lib.rs @@ -36,6 +36,7 @@ const ENDPOINT_ARGUMENTS: &[&str] = &[ ]; struct EndpointRouter { + name: Option, routes: Punctuated, cors_config: Option, mounts: Punctuated, @@ -44,6 +45,7 @@ struct EndpointRouter { impl Parse for EndpointRouter { fn parse(input: ParseStream) -> syn::Result { + let mut name = None; let mut routes = None; let mut cors_config = None; let mut mounts = None; @@ -60,6 +62,16 @@ impl Parse for EndpointRouter { // Parse various keys match key.to_string().as_str() { + "name" => { + if name.is_none() { + name = Some(input.parse()?); + } else { + return Err(syn::Error::new( + key.span(), + format!("Duplicate key `{}`.", key), + )); + } + } "routes" => { if routes.is_none() { let routes_content; @@ -134,6 +146,7 @@ impl Parse for EndpointRouter { let mounts = mounts.unwrap_or_default(); Ok(EndpointRouter { + name, routes, cors_config, mounts, @@ -144,6 +157,12 @@ impl Parse for EndpointRouter { impl EndpointRouter { fn render(self) -> syn::Result { + let name = if let Some(name) = self.name { + name.to_token_stream() + } else { + quote! { Router } + }; + let endpoints = self .routes .into_iter() @@ -186,8 +205,8 @@ impl EndpointRouter { .collect::>(); Ok(quote! { - pub struct Router; - impl Router { + pub struct #name; + impl #name { #[doc(hidden)] #[tracing::instrument(level="debug", name = "router_matcher", skip_all)] pub async fn __inner( diff --git a/packages/common/fdb-util/src/keys.rs b/packages/common/fdb-util/src/keys.rs index fadcafa55d..e8f8c95b46 100644 --- a/packages/common/fdb-util/src/keys.rs +++ b/packages/common/fdb-util/src/keys.rs @@ -54,6 +54,7 @@ pub const IMAGE_ID: usize = 52; pub const ACTOR2: usize = 53; pub const PENDING_ACTOR: usize = 54; pub const PENDING_ACTOR_BY_IMAGE_ID: usize = 55; +pub const CONTAINER: usize = 56; // Directories with fdbrs must use string paths instead of tuples pub mod dir { @@ -120,6 +121,7 @@ pub fn key_from_str(key: &str) -> Option { "actor2" => Some(ACTOR2), "pending_actor" => Some(PENDING_ACTOR), "pending_actor_by_image_id" => Some(PENDING_ACTOR_BY_IMAGE_ID), + "container" => Some(CONTAINER), _ => None, } } diff --git a/packages/common/formatted-error/errors/container/failed-to-create.md b/packages/common/formatted-error/errors/container/failed-to-create.md new file mode 100644 index 0000000000..4683a0b848 --- /dev/null +++ b/packages/common/formatted-error/errors/container/failed-to-create.md @@ -0,0 +1,10 @@ +--- +name = "CONTAINER_FAILED_TO_CREATE" +description = "Container failed to create: {error}" +description_basic = "Container failed to create." +http_status = 400 +--- + +# Container Failed To Create + +Container failed to create. diff --git a/packages/common/formatted-error/errors/container/logs/invalid-container-ids.md b/packages/common/formatted-error/errors/container/logs/invalid-container-ids.md new file mode 100644 index 0000000000..09c9577d6c --- /dev/null +++ b/packages/common/formatted-error/errors/container/logs/invalid-container-ids.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_LOGS_INVALID_CONTAINER_IDS" +description = "Invalid container IDs format." +http_status = 400 +--- + +# Invalid Container Ids + +The provided list of container IDs is not in a valid JSON format. Please provide a valid JSON array of UUIDs. diff --git a/packages/common/formatted-error/errors/container/logs/no-container-ids.md b/packages/common/formatted-error/errors/container/logs/no-container-ids.md new file mode 100644 index 0000000000..9ab5468d77 --- /dev/null +++ b/packages/common/formatted-error/errors/container/logs/no-container-ids.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_LOGS_NO_CONTAINER_IDS" +description = "No container IDs provided." +http_status = 400 +--- + +# No Container Ids + +No container IDs were provided in the request. Please provide at least one valid container ID. diff --git a/packages/common/formatted-error/errors/container/logs/no-valid-container-ids.md b/packages/common/formatted-error/errors/container/logs/no-valid-container-ids.md new file mode 100644 index 0000000000..06195fdbb4 --- /dev/null +++ b/packages/common/formatted-error/errors/container/logs/no-valid-container-ids.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_LOGS_NO_VALID_CONTAINER_IDS" +description = "No valid container IDs found." +http_status = 400 +--- + +# No Valid Container Ids + +None of the provided container IDs are valid for this game/environment. Please provide valid container IDs. diff --git a/packages/common/formatted-error/errors/container/metrics/invalid-interval.md b/packages/common/formatted-error/errors/container/metrics/invalid-interval.md new file mode 100644 index 0000000000..d42499cd6e --- /dev/null +++ b/packages/common/formatted-error/errors/container/metrics/invalid-interval.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_METRICS_INVALID_INTERVAL" +description = "Invalid interval provided." +http_status = 400 +--- + +# Invalid Interval + +The provided interval must be greater than 0. Please provide a valid interval value in milliseconds. \ No newline at end of file diff --git a/packages/common/formatted-error/errors/container/metrics/invalid-metrics.md b/packages/common/formatted-error/errors/container/metrics/invalid-metrics.md new file mode 100644 index 0000000000..673a7d4632 --- /dev/null +++ b/packages/common/formatted-error/errors/container/metrics/invalid-metrics.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_METRICS_INVALID_METRICS" +description = "Invalid metrics format." +http_status = 400 +--- + +# Invalid Metrics + +The provided list of metrics is not in a valid JSON format. Please provide a valid JSON array of metric names. \ No newline at end of file diff --git a/packages/common/formatted-error/errors/container/metrics/no-metrics.md b/packages/common/formatted-error/errors/container/metrics/no-metrics.md new file mode 100644 index 0000000000..268e0ad4eb --- /dev/null +++ b/packages/common/formatted-error/errors/container/metrics/no-metrics.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_METRICS_NO_METRICS" +description = "No metrics specified." +http_status = 400 +--- + +# No Metrics + +No metrics were specified in the request. Please provide at least one metric name to query. \ No newline at end of file diff --git a/packages/common/formatted-error/errors/container/metrics/unsupported-metrics.md b/packages/common/formatted-error/errors/container/metrics/unsupported-metrics.md new file mode 100644 index 0000000000..6b248e92b5 --- /dev/null +++ b/packages/common/formatted-error/errors/container/metrics/unsupported-metrics.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_METRICS_UNSUPPORTED_METRICS" +description = "Unsupported metrics requested." +http_status = 400 +--- + +# Unsupported Metrics + +The requested metrics are not supported. Supported metrics include: cpu, memory, memory_limit, network_rx_bytes, network_tx_bytes. \ No newline at end of file diff --git a/packages/common/formatted-error/errors/container/not-found.md b/packages/common/formatted-error/errors/container/not-found.md new file mode 100644 index 0000000000..ea16414450 --- /dev/null +++ b/packages/common/formatted-error/errors/container/not-found.md @@ -0,0 +1,9 @@ +--- +name = "CONTAINER_NOT_FOUND" +description = "Container not found." +http_status = 400 +--- + +# Container Not Found + +Container not found for the given ID. diff --git a/packages/common/pools/src/db/sqlite/mod.rs b/packages/common/pools/src/db/sqlite/mod.rs index 2c6d5e7ba2..4239e2349f 100644 --- a/packages/common/pools/src/db/sqlite/mod.rs +++ b/packages/common/pools/src/db/sqlite/mod.rs @@ -508,9 +508,7 @@ impl SqlitePoolManager { .sum::() as f64; // Update state if write was successful - for (key_packed, data) in db_data_to_snapshot { - let hex_key = hex::encode(&**key_packed); - + for (_, data) in db_data_to_snapshot { // Because this was batch processed we don't know the rate for each individual key, just estimate // by calculating the size ratio let ratio = data.len() as f64 / total_data_size; diff --git a/packages/common/util/id/src/lib.rs b/packages/common/util/id/src/lib.rs index 5e120437c8..c77990d5bd 100644 --- a/packages/common/util/id/src/lib.rs +++ b/packages/common/util/id/src/lib.rs @@ -32,6 +32,7 @@ pub enum IdError { #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum Id { + // TODO: Once all old actors are gone, delete the v0 variant V0(Uuid), V1([u8; 18]), } @@ -237,7 +238,7 @@ impl TuplePack for Id { fn pack( &self, w: &mut W, - tuple_depth: TupleDepth, + _tuple_depth: TupleDepth, ) -> std::io::Result { let mut size = 1; @@ -263,7 +264,7 @@ impl TuplePack for Id { } impl<'de> TupleUnpack<'de> for Id { - fn unpack(input: &[u8], tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> { + fn unpack(input: &[u8], _tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> { let input = fdb_util::parse_code(input, fdb_util::codes::ID)?; let (input2, version) = fdb_util::parse_byte(input)?; @@ -336,7 +337,7 @@ impl sqlx::postgres::PgHasArrayType for Id { impl Default for Id { fn default() -> Self { - Id::V0(Uuid::new_v4()) + Id::V1(Default::default()) } } diff --git a/packages/core/api/actor/src/assert.rs b/packages/core/api/actor/src/assert.rs index de9d5891cd..d23859d931 100644 --- a/packages/core/api/actor/src/assert.rs +++ b/packages/core/api/actor/src/assert.rs @@ -4,8 +4,8 @@ use futures_util::{ FutureExt, StreamExt, }; use redis::{FromRedisValue, RedisResult, ToRedisArgs, Value}; -use rivet_api::apis::actors_api::actors_get; use rivet_api::apis::configuration::Configuration; +use rivet_api::apis::{actors_api, actors_v1_api, containers_api}; use rivet_cache::CacheKey; use rivet_operation::prelude::*; use std::collections::HashMap; @@ -107,13 +107,13 @@ impl CacheKey for ActorValidationCacheKey { /// c. Validate each actor against its datacenter /// d. Stores validation results in cache /// 5. Returns only the actor IDs that were successfully validated -pub async fn actor_for_env( +pub async fn actor_for_env_v1( ctx: &Ctx, - actor_ids: &[util::Id], + actor_ids: &[Uuid], game_id: Uuid, env_id: Uuid, _error_code: Option<&'static str>, -) -> GlobalResult> { +) -> GlobalResult> { if actor_ids.is_empty() { return Ok(Vec::new()); } @@ -124,7 +124,7 @@ pub async fn actor_for_env( .map(|&actor_id| ActorValidationCacheKey { game_id, env_id, - actor_id, + actor_id: actor_id.into(), }) .collect::>(); @@ -274,6 +274,270 @@ pub async fn actor_for_env( // from the stream so that we can skip validation tasks. let validation_results = Arc::new(Mutex::new(HashMap::::new())); + // Create a stream of all datacenter + actor_id combinations + let mut validation_tasks = + stream::iter(actor_ids_to_validate.into_iter().flat_map(|actor_id| { + filtered_datacenters + .iter() + .map(|dc| (dc.name_id.clone(), actor_id)) + .collect::>() + })) + .map(|(dc_name_id, actor_id)| { + let validation_results = validation_results.clone(); + let game_name_id = game_name_id.clone(); + let env_name_id = env_name_id.clone(); + + async move { + // Skip this task if actor already validated + { + let map = validation_results.lock().await; + if map.get(&actor_id).map_or(false, |&v| v) { + return GlobalResult::Ok(()); + } + } + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx + .config() + .server()? + .rivet + .edge_api_url_str(&dc_name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api with project and environment name_ids + match actors_v1_api::actors_v1_get( + &config, + &actor_id.to_string(), + Some(&game_name_id), + Some(&env_name_id), + None, // endpoint_type + ) + .await + { + Ok(_) => { + // Actor exists and belongs to this game/env + let mut map = validation_results.lock().await; + map.insert(actor_id, true); + } + Err(err) => { + tracing::debug!(?err, ?actor_id, "Actor validation failed"); + // Only mark as invalid if not already validated + let mut map = validation_results.lock().await; + map.entry(actor_id).or_insert(false); + } + }; + + GlobalResult::Ok(()) + } + .boxed() + }) + .buffer_unordered(16); // Process up to 16 concurrent validation requests + + // Process results (just consume the stream) + while let Some(_) = validation_tasks.next().await {} + + // Get the validation results + let validation_results = validation_results.lock().await.clone(); + + // Resolve cache entries + for key in keys_to_fetch { + let is_valid = validation_results + .get(&key.actor_id) + .copied() + .unwrap_or(false); + + // Add to cache + cache.resolve( + &key, + ActorValidationData { + is_valid, + game_name_id: game_name_id.clone(), + env_name_id: env_name_id.clone(), + }, + ); + } + + Ok(cache) + } + } + }) + .await?; + + // Filter valid actor IDs + let valid_actors = actor_ids + .iter() + .filter_map(|&actor_id| { + let cache_key = ActorValidationCacheKey { + game_id, + env_id, + actor_id: actor_id.into(), + }; + + // Check if the actor is valid in the cache results + actor_validation_results + .iter() + .find(|(k, _)| *k == cache_key) + .and_then( + |(_, data)| { + if data.is_valid { + Some(actor_id) + } else { + None + } + }, + ) + }) + .collect::>(); + + Ok(valid_actors) +} + +/// Returns a list of valid actor IDs that belong to the given environment and game. +/// Filters out any invalid actor IDs silently. +/// +/// Process steps: +/// 1. Creates cache keys for each actor ID to check +/// 2. Retrieves game and environment metadata +/// 3. Uses a batch caching mechanism to efficiently validate multiple actors +/// 4. For actors not in cache: +/// a. Retrieves cluster and datacenter information +/// b. Filters for valid datacenters with worker/guard pools +/// c. Validate each actor against its datacenter +/// d. Stores validation results in cache +/// 5. Returns only the actor IDs that were successfully validated +pub async fn actor_for_env( + ctx: &Ctx, + actor_ids: &[util::Id], + game_id: Uuid, + env_id: Uuid, + _error_code: Option<&'static str>, +) -> GlobalResult> { + if actor_ids.is_empty() { + return Ok(Vec::new()); + } + + // Create cache keys for each actor ID + let cache_keys = actor_ids + .iter() + .map(|&actor_id| ActorValidationCacheKey { + game_id, + env_id, + actor_id, + }) + .collect::>(); + + // Get game and environment information + let game_res = match op!([ctx] game_get { + game_ids: vec![game_id.into()], + }) + .await + { + Ok(res) => res, + Err(err) => { + tracing::warn!(?err, ?game_id, "Failed to get game"); + return Ok(Vec::new()); + } + }; + + let game = match game_res.games.first() { + Some(game) => game, + None => { + tracing::warn!(?game_id, "Game not found"); + return Ok(Vec::new()); + } + }; + + let env_res = match op!([ctx] game_namespace_get { + namespace_ids: vec![env_id.into()], + }) + .await + { + Ok(res) => res, + Err(err) => { + tracing::warn!(?err, ?env_id, "Failed to get environment"); + return Ok(Vec::new()); + } + }; + + let env = match env_res.namespaces.first() { + Some(env) => env, + None => { + tracing::warn!(?env_id, "Environment not found"); + return Ok(Vec::new()); + } + }; + + // Setup shared context for batch validation + let game_name_id = game.name_id.clone(); + let env_name_id = env.name_id.clone(); + + // Use batch caching for actor validation + let actor_validation_results = ctx + .cache() + .fetch_all("actor_validation", cache_keys, { + let game_name_id = game_name_id.clone(); + let env_name_id = env_name_id.clone(); + + move |mut cache, keys_to_fetch| { + let game_name_id = game_name_id.clone(); + let env_name_id = env_name_id.clone(); + + async move { + // We don't need to track game/env pairs since they should all be the same + // in a given call, but we could verify it if needed + let _game_env_pairs = keys_to_fetch + .iter() + .map(|key| (key.game_id, key.env_id)) + .collect::>(); + + // Get actor IDs to validate + let actor_ids_to_validate = keys_to_fetch + .iter() + .map(|key| key.actor_id) + .collect::>(); + + if actor_ids_to_validate.is_empty() { + return Ok(cache); + } + + let labels = actor_ids_to_validate + .iter() + .flat_map(|id| id.label()) + .collect::>(); + let dcs_res = match ctx + .op(cluster::ops::datacenter::get_for_label::Input { + labels: labels.clone(), + }) + .await + { + Ok(res) => res, + Err(err) => { + tracing::warn!(?err, ?labels, "Failed to get datacenters for labels"); + return Ok(cache); + } + }; + + // Filter valid datacenters + let filtered_datacenters = dcs_res + .datacenters + .into_iter() + .filter(|dc| { + crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false) + }) + .collect::>(); + + if filtered_datacenters.is_empty() { + tracing::warn!("No valid datacenters with worker and guard pools"); + return Ok(cache); + } + + // Track validation results for each actor. This is done instead of collecting the results + // from the stream so that we can skip validation tasks. + let validation_results = Arc::new(Mutex::new(HashMap::::new())); + // Create a stream of all datacenter + actor_id combinations let mut validation_tasks = stream::iter(actor_ids_to_validate.into_iter().flat_map(|actor_id| { @@ -318,7 +582,7 @@ pub async fn actor_for_env( }; // Pass the request to the edge api with project and environment name_ids - match actors_get( + match actors_api::actors_get( &config, &actor_id.to_string(), Some(&game_name_id), @@ -404,3 +668,281 @@ pub async fn actor_for_env( Ok(valid_actors) } + +/// Returns a list of valid container IDs that belong to the given environment and game. +/// Filters out any invalid container IDs silently. +/// +/// Process steps: +/// 1. Creates cache keys for each container ID to check +/// 2. Retrieves game and environment metadata +/// 3. Uses a batch caching mechanism to efficiently validate multiple containers +/// 4. For containers not in cache: +/// a. Retrieves cluster and datacenter information +/// b. Filters for valid datacenters with worker/guard pools +/// c. Validate each container against its datacenter +/// d. Stores validation results in cache +/// 5. Returns only the container IDs that were successfully validated +pub async fn container_for_env( + ctx: &Ctx, + container_ids: &[util::Id], + game_id: Uuid, + env_id: Uuid, + _error_code: Option<&'static str>, +) -> GlobalResult> { + if container_ids.is_empty() { + return Ok(Vec::new()); + } + + // Create cache keys for each container ID + let cache_keys = container_ids + .iter() + .map(|&container_id| ActorValidationCacheKey { + game_id, + env_id, + actor_id: container_id, + }) + .collect::>(); + + // Get game and environment information + let game_res = match op!([ctx] game_get { + game_ids: vec![game_id.into()], + }) + .await + { + Ok(res) => res, + Err(err) => { + tracing::warn!(?err, ?game_id, "Failed to get game"); + return Ok(Vec::new()); + } + }; + + let game = match game_res.games.first() { + Some(game) => game, + None => { + tracing::warn!(?game_id, "Game not found"); + return Ok(Vec::new()); + } + }; + + let env_res = match op!([ctx] game_namespace_get { + namespace_ids: vec![env_id.into()], + }) + .await + { + Ok(res) => res, + Err(err) => { + tracing::warn!(?err, ?env_id, "Failed to get environment"); + return Ok(Vec::new()); + } + }; + + let env = match env_res.namespaces.first() { + Some(env) => env, + None => { + tracing::warn!(?env_id, "Environment not found"); + return Ok(Vec::new()); + } + }; + + // Setup shared context for batch validation + let game_name_id = game.name_id.clone(); + let env_name_id = env.name_id.clone(); + + // Use batch caching for container validation + let container_validation_results = ctx + .cache() + .fetch_all("container_validation", cache_keys, { + let game_name_id = game_name_id.clone(); + let env_name_id = env_name_id.clone(); + + move |mut cache, keys_to_fetch| { + let game_name_id = game_name_id.clone(); + let env_name_id = env_name_id.clone(); + + async move { + // We don't need to track game/env pairs since they should all be the same + // in a given call, but we could verify it if needed + let _game_env_pairs = keys_to_fetch + .iter() + .map(|key| (key.game_id, key.env_id)) + .collect::>(); + + // Get container IDs to validate + let container_ids_to_validate = keys_to_fetch + .iter() + .map(|key| key.actor_id) + .collect::>(); + + if container_ids_to_validate.is_empty() { + return Ok(cache); + } + + let labels = container_ids_to_validate + .iter() + .flat_map(|id| id.label()) + .collect::>(); + let dcs_res = match ctx + .op(cluster::ops::datacenter::get_for_label::Input { + labels: labels.clone(), + }) + .await + { + Ok(res) => res, + Err(err) => { + tracing::warn!(?err, ?labels, "Failed to get datacenters for labels"); + return Ok(cache); + } + }; + + // Filter valid datacenters + let filtered_datacenters = dcs_res + .datacenters + .into_iter() + .filter(|dc| { + crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false) + }) + .collect::>(); + + if filtered_datacenters.is_empty() { + tracing::warn!("No valid datacenters with worker and guard pools"); + return Ok(cache); + } + + // Track validation results for each container. This is done instead of collecting the results + // from the stream so that we can skip validation tasks. + let validation_results = Arc::new(Mutex::new(HashMap::::new())); + + // Create a stream of all datacenter + container_id combinations + let mut validation_tasks = + stream::iter(container_ids_to_validate.into_iter().flat_map( + |container_id| { + // If the container has the datacenter label in its id, use that instead of all dcs + if let Some(label) = container_id.label() { + filtered_datacenters + .iter() + .find(|dc| dc.label() == label) + .iter() + .map(|dc| (dc.name_id.clone(), container_id)) + .collect::>() + } else { + filtered_datacenters + .iter() + .map(|dc| (dc.name_id.clone(), container_id)) + .collect::>() + } + }, + )) + .map(|(dc_name_id, container_id)| { + let validation_results = validation_results.clone(); + let game_name_id = game_name_id.clone(); + let env_name_id = env_name_id.clone(); + + async move { + // Skip this task if container already validated + { + let map = validation_results.lock().await; + if map.get(&container_id).map_or(false, |&v| v) { + return GlobalResult::Ok(()); + } + } + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx + .config() + .server()? + .rivet + .edge_api_url_str(&dc_name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api with project and environment name_ids + match containers_api::containers_get( + &config, + &container_id.to_string(), + Some(&game_name_id), + Some(&env_name_id), + None, // endpoint_type + ) + .await + { + Ok(_) => { + // container exists and belongs to this game/env + let mut map = validation_results.lock().await; + map.insert(container_id, true); + } + Err(err) => { + tracing::debug!( + ?err, + ?container_id, + "Container validation failed" + ); + // Only mark as invalid if not already validated + let mut map = validation_results.lock().await; + map.entry(container_id).or_insert(false); + } + }; + + GlobalResult::Ok(()) + } + .boxed() + }) + .buffer_unordered(16); // Process up to 16 concurrent validation requests + + // Process results (just consume the stream) + while let Some(_) = validation_tasks.next().await {} + + // Get the validation results + let validation_results = validation_results.lock().await.clone(); + + // Resolve cache entries + for key in keys_to_fetch { + let is_valid = validation_results + .get(&key.actor_id) + .copied() + .unwrap_or(false); + + // Add to cache + cache.resolve( + &key, + ActorValidationData { + is_valid, + game_name_id: game_name_id.clone(), + env_name_id: env_name_id.clone(), + }, + ); + } + + Ok(cache) + } + } + }) + .await?; + + // Filter valid container IDs + let valid_containers = container_ids + .iter() + .filter_map(|&container_id| { + let cache_key = ActorValidationCacheKey { + game_id, + env_id, + actor_id: container_id, + }; + + // Check if the container is valid in the cache results + container_validation_results + .iter() + .find(|(k, _)| *k == cache_key) + .and_then(|(_, data)| { + if data.is_valid { + Some(container_id) + } else { + None + } + }) + }) + .collect::>(); + + Ok(valid_containers) +} diff --git a/packages/core/api/actor/src/route/actors/logs.rs b/packages/core/api/actor/src/route/actors/logs.rs new file mode 100644 index 0000000000..596625b7b7 --- /dev/null +++ b/packages/core/api/actor/src/route/actors/logs.rs @@ -0,0 +1,197 @@ +use api_helper::{ + anchor::{WatchIndexQuery, WatchResponse}, + ctx::Ctx, +}; +use proto::backend; +use rivet_api::models; +use rivet_operation::prelude::*; +use serde::Deserialize; +use std::time::Duration; + +use crate::{ + assert, + auth::{Auth, CheckOpts, CheckOutput}, +}; + +use super::GlobalQuery; + +// MARK: GET /v2/actors/{}/logs +#[derive(Debug, Deserialize)] +pub struct GetActorLogsQuery { + #[serde(flatten)] + pub global: GlobalQuery, + /// JSON-encoded user query expression for filtering logs + pub query_json: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn get_logs( + ctx: Ctx, + watch_index: WatchIndexQuery, + query: GetActorLogsQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: false, + opt_auth: false, + }, + ) + .await?; + + // Parse user query expression if provided + let user_query_expr = if let Some(query_json) = &query.query_json { + let expr = match serde_json::from_str::(query_json) { + Ok(expr) => expr, + Err(e) => { + bail_with!(API_BAD_QUERY, error = e.to_string()); + } + }; + Some(expr) + } else { + // No query provided, return empty result + None + }; + + // Timestamp to start the query at + let before_nts = util::timestamp::now() * 1_000_000; + + // Handle anchor + let logs_res = if let Some(anchor) = watch_index.as_i64()? { + let query_start = tokio::time::Instant::now(); + let user_query_expr_clone = user_query_expr.clone(); + + // Poll for new logs + let logs_res = loop { + // Read logs after the timestamp + // + // We read descending in order to get at most 256 of the most recent logs. If we used + // asc, we would be paginating through all the logs which would likely fall behind + // actual stream and strain the database. + // + // We return fewer logs than the non-anchor request since this will be called + // frequently and should not return a significant amount of logs. + let logs_res = ctx + .op(pegboard::ops::actor::log::read_with_query::Input { + env_id, + count: 64, + order_by: pegboard::ops::actor::log::read_with_query::Order::Desc, + query: pegboard::ops::actor::log::read_with_query::Query::AfterNts(anchor), + user_query_expr: user_query_expr_clone.clone(), + }) + .await?; + + // Return logs + if !logs_res.entries.is_empty() { + break logs_res; + } + + // Timeout cleanly + if query_start.elapsed().as_millis() > util::watch::DEFAULT_TIMEOUT as u128 { + break pegboard::ops::actor::log::read_with_query::Output { + entries: Vec::new(), + }; + } + + // Throttle request + // + // We don't use `tokio::time::interval` because if the request takes longer than 500 + // ms, we'll enter a tight loop of requests. + tokio::time::sleep(Duration::from_millis(1000)).await; + }; + + // Since we're using watch, we don't want this request to return immediately if there's new + // results. Add an artificial timeout in order to prevent a tight loop if there's a high + // log frequency. + tokio::time::sleep_until(query_start + Duration::from_secs(1)).await; + + logs_res + } else { + // Read most recent logs + ctx.op(pegboard::ops::actor::log::read_with_query::Input { + env_id, + count: 256, + order_by: pegboard::ops::actor::log::read_with_query::Order::Desc, + query: pegboard::ops::actor::log::read_with_query::Query::BeforeNts(before_nts), + user_query_expr: user_query_expr.clone(), + }) + .await? + }; + + // Convert to old Output format for compatibility + let logs_res = pegboard::ops::actor::log::read::Output { + entries: logs_res + .entries + .into_iter() + .map(|e| pegboard::ops::actor::log::read::LogEntry { + ts: e.ts, + message: e.message, + stream_type: e.stream_type, + actor_id: e.actor_id, + }) + .collect(), + }; + + // Build actor_ids map for lookup + let mut actor_id_to_index: std::collections::HashMap = + std::collections::HashMap::new(); + let mut unique_actor_ids: Vec = Vec::new(); + + // Collect unique actor IDs and map them to indices + for entry in &logs_res.entries { + if !actor_id_to_index.contains_key(&entry.actor_id) { + actor_id_to_index.insert(entry.actor_id.clone(), unique_actor_ids.len() as i32); + unique_actor_ids.push(entry.actor_id.to_string()); + } + } + + // Convert logs + let mut lines = logs_res + .entries + .iter() + .map(|entry| base64::encode(&entry.message)) + .collect::>(); + let mut timestamps = logs_res + .entries + .iter() + // Is nanoseconds + .map(|x| x.ts / 1_000_000) + .map(util::timestamp::to_string) + .collect::, _>>()?; + let mut streams = logs_res + .entries + .iter() + .map(|x| x.stream_type as i32) + .collect::>(); + let mut foreigns = logs_res + .entries + .iter() + .map(|x| x.foreign) + .collect::>(); + let mut actor_indices = logs_res + .entries + .iter() + .map(|x| *actor_id_to_index.get(&x.actor_id).unwrap_or(&0)) + .collect::>(); + + // Order desc + lines.reverse(); + timestamps.reverse(); + streams.reverse(); + foreigns.reverse(); + actor_indices.reverse(); + + let watch_nts = logs_res.entries.first().map_or(before_nts, |x| x.ts); + Ok(models::ActorsGetActorLogsResponse { + actor_ids: unique_actor_ids, + lines, + timestamps, + streams, + foreigns, + actor_indices, + watch: WatchResponse::new_as_model(watch_nts), + }) +} diff --git a/packages/core/api/actor/src/route/actors/metrics.rs b/packages/core/api/actor/src/route/actors/metrics.rs new file mode 100644 index 0000000000..b409381f17 --- /dev/null +++ b/packages/core/api/actor/src/route/actors/metrics.rs @@ -0,0 +1,336 @@ +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use rivet_api::models; +use rivet_operation::prelude::*; +use serde::Deserialize; +use std::collections::{BTreeMap, HashMap}; + +use crate::{ + assert, + auth::{Auth, CheckOpts, CheckOutput}, +}; + +use super::GlobalQuery; + +#[derive(Debug, Deserialize)] +pub struct GetActorMetricsQuery { + #[serde(flatten)] + pub global: GlobalQuery, + pub start: i64, + pub end: i64, + pub interval: i64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum MetricType { + Counter, + Gauge, +} + +impl MetricType { + pub fn as_str(&self) -> &'static str { + match self { + MetricType::Counter => "counter", + MetricType::Gauge => "gauge", + } + } +} + +#[derive(Debug, clickhouse::Row, serde::Deserialize)] +pub struct MetricRow { + pub time_bucket_index: u32, + pub metric_name: String, + pub value: f64, + pub tcp_state: String, + pub udp_state: String, + pub device: String, + pub failure_type: String, + pub scope: String, + pub task_state: String, + pub interface: String, +} + +#[derive(Debug)] +pub struct ProcessedMetricRow { + pub row: MetricRow, + pub metric_type: MetricType, +} + +// TODO: Move contents of this to an op +#[tracing::instrument(skip_all)] +pub async fn get_metrics( + ctx: Ctx, + actor_id: util::Id, + _watch_index: WatchIndexQuery, + query: GetActorMetricsQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: false, + opt_auth: false, + }, + ) + .await?; + + // Validate the actor belongs to this game/env + assert::actor_for_env(&ctx, &[actor_id], game_id, env_id, None).await?; + + let clickhouse = ctx.clickhouse().await?; + + // Convert milliseconds to seconds for ClickHouse + let start_seconds = query.start / 1000; + let end_seconds = query.end / 1000; + let interval_seconds = query.interval / 1000; + + if interval_seconds == 0 { + bail_with!(ACTOR_METRICS_INVALID_INTERVAL); + } + + let prefix = "/system.slice/pegboard-runner-{}-"; + + // Query gauge metrics (current values) + let gauge_query = indoc!( + " + WITH runner_data AS ( + SELECT runner_id + FROM db_pegboard_runner.actor_runners + WHERE actor_id = ? + LIMIT 1 + ) + SELECT + toUInt32(floor((toUnixTimestamp(TimeUnix) - ?) / ?)) as time_bucket_index, + MetricName as metric_name, + max(Value) as value, + COALESCE(Attributes['tcp_state'], '') as tcp_state, + COALESCE(Attributes['udp_state'], '') as udp_state, + COALESCE(Attributes['device'], '') as device, + COALESCE(Attributes['failure_type'], '') as failure_type, + COALESCE(Attributes['scope'], '') as scope, + COALESCE(Attributes['state'], '') as task_state, + COALESCE(Attributes['interface'], '') as interface + FROM otel.otel_metrics_gauge + WHERE + TimeUnix >= fromUnixTimestamp(?) + AND TimeUnix <= fromUnixTimestamp(?) + AND MetricName IN [ + 'container_cpu_load_average_10s', + 'container_file_descriptors', + 'container_last_seen', + 'container_memory_usage_bytes', + 'container_memory_working_set_bytes', + 'container_memory_cache', + 'container_memory_rss', + 'container_memory_swap', + 'container_memory_mapped_file', + 'container_memory_max_usage_bytes', + 'container_network_tcp_usage_total', + 'container_network_tcp6_usage_total', + 'container_network_udp_usage_total', + 'container_network_udp6_usage_total', + 'container_sockets', + 'container_spec_cpu_period', + 'container_spec_cpu_shares', + 'container_spec_memory_limit_bytes', + 'container_spec_memory_reservation_limit_bytes', + 'container_spec_memory_swap_limit_bytes', + 'container_start_time_seconds', + 'container_tasks_state', + 'container_threads', + 'container_threads_max', + 'container_processes' + ] + AND has(Attributes, 'id') + AND startsWith(Attributes['id'], concat(?, runner_data.runner_id, '-')) + GROUP BY time_bucket_index, metric_name, tcp_state, udp_state, device, failure_type, scope, task_state, interface + ORDER BY time_bucket_index ASC, metric_name + " + ); + + let gauge_future = clickhouse + .query(&gauge_query) + .bind(&actor_id) + .bind(start_seconds) + .bind(interval_seconds) + .bind(start_seconds) + .bind(end_seconds) + .bind(&prefix) + .fetch_all::(); + + // Query sum metrics (rates/counters) + let sum_query = indoc!( + " + WITH runner_data AS ( + SELECT runner_id + FROM db_pegboard_runner.actor_runners + WHERE actor_id = ? + LIMIT 1 + ) + SELECT + toUInt32(floor((toUnixTimestamp(TimeUnix) - ?) / ?)) as time_bucket_index, + MetricName as metric_name, + max(Value) as value, + COALESCE(Attributes['tcp_state'], '') as tcp_state, + COALESCE(Attributes['udp_state'], '') as udp_state, + COALESCE(Attributes['device'], '') as device, + COALESCE(Attributes['failure_type'], '') as failure_type, + COALESCE(Attributes['scope'], '') as scope, + COALESCE(Attributes['state'], '') as task_state, + COALESCE(Attributes['interface'], '') as interface + FROM otel.otel_metrics_sum + WHERE + TimeUnix >= fromUnixTimestamp(?) + AND TimeUnix <= fromUnixTimestamp(?) + AND MetricName IN [ + 'container_cpu_schedstat_run_periods_total', + 'container_cpu_schedstat_run_seconds_total', + 'container_cpu_schedstat_runqueue_seconds_total', + 'container_cpu_system_seconds_total', + 'container_cpu_user_seconds_total', + 'container_cpu_usage_seconds_total', + 'container_memory_failcnt', + 'container_memory_failures_total', + 'container_fs_reads_bytes_total', + 'container_fs_writes_bytes_total', + 'container_network_receive_bytes_total', + 'container_network_receive_errors_total', + 'container_network_receive_packets_dropped_total', + 'container_network_receive_packets_total', + 'container_network_transmit_bytes_total', + 'container_network_transmit_errors_total', + 'container_network_transmit_packets_dropped_total', + 'container_network_transmit_packets_total' + ] + AND has(Attributes, 'id') + AND startsWith(Attributes['id'], concat(?, runner_data.runner_id, '-')) + GROUP BY time_bucket_index, metric_name, tcp_state, udp_state, device, failure_type, scope, task_state, interface + ORDER BY time_bucket_index ASC, metric_name + " + ); + + let sum_future = clickhouse + .query(&sum_query) + .bind(&actor_id) + .bind(start_seconds) + .bind(interval_seconds) + .bind(start_seconds) + .bind(end_seconds) + .bind(&prefix) + .fetch_all::(); + + let (gauge_rows, sum_rows) = + tokio::try_join!(gauge_future, sum_future).map_err(|err| GlobalError::from(err))?; + + // Map metric types based on query source + let gauge_rows: Vec = gauge_rows + .into_iter() + .map(|row| ProcessedMetricRow { + row, + metric_type: MetricType::Gauge, + }) + .collect(); + + let sum_rows: Vec = sum_rows + .into_iter() + .map(|row| ProcessedMetricRow { + row, + metric_type: MetricType::Counter, + }) + .collect(); + + // Combine both result sets + let mut rows = gauge_rows; + rows.extend(sum_rows); + + // Calculate the number of time buckets we expect + let num_buckets = ((end_seconds - start_seconds) / interval_seconds + 1) as usize; + + // Use HashMap to store metrics with their attributes and types + let mut metrics: HashMap<(String, BTreeMap), (String, Vec)> = + HashMap::new(); + + // Process rows and organize by metric name + attributes + for processed_row in rows { + let row = &processed_row.row; + if row.time_bucket_index >= num_buckets as u32 { + continue; + } + let bucket_idx = row.time_bucket_index as usize; + + // Build attributes map for this row + let mut attributes = BTreeMap::new(); + + // Add non-empty attributes + if !row.tcp_state.is_empty() { + attributes.insert("tcp_state".to_string(), row.tcp_state.clone()); + } + if !row.udp_state.is_empty() { + attributes.insert("udp_state".to_string(), row.udp_state.clone()); + } + if !row.device.is_empty() { + attributes.insert("device".to_string(), row.device.clone()); + } + if !row.failure_type.is_empty() { + attributes.insert("failure_type".to_string(), row.failure_type.clone()); + } + if !row.scope.is_empty() { + attributes.insert("scope".to_string(), row.scope.clone()); + } + if !row.task_state.is_empty() { + attributes.insert("state".to_string(), row.task_state.clone()); + } + if !row.interface.is_empty() { + attributes.insert("interface".to_string(), row.interface.clone()); + } + + // Create metric key (metric name + attributes) + let metric_key = (row.metric_name.clone(), attributes); + + // Initialize metric entry if it doesn't exist + let (_existing_type, metric_values) = metrics.entry(metric_key).or_insert_with(|| { + ( + processed_row.metric_type.as_str().to_string(), + vec![0.0; num_buckets], + ) + }); + + // Add or set the value based on metric type + match processed_row.metric_type { + MetricType::Counter => { + metric_values[bucket_idx] += row.value; + } + MetricType::Gauge => { + metric_values[bucket_idx] = row.value; + } + } + } + + // Convert HashMap to ordered vectors for response + let mut metric_names = Vec::new(); + let mut metric_attributes = Vec::new(); + let mut metric_types = Vec::new(); + let mut metric_values = Vec::new(); + + // Sort metrics by name for consistent ordering + let mut sorted_metrics: Vec<_> = metrics.into_iter().collect(); + sorted_metrics.sort_by(|a, b| a.0 .0.cmp(&b.0 .0)); + + for ((name, attributes), (metric_type, values)) in sorted_metrics { + metric_names.push(name); + // Convert BTreeMap back to HashMap for the API response + let attributes_hashmap: HashMap = attributes.into_iter().collect(); + metric_attributes.push(attributes_hashmap); + metric_types.push(metric_type); + metric_values.push(values); + } + + Ok(models::ActorsGetActorMetricsResponse { + actor_ids: vec![actor_id.to_string()], + metric_names, + metric_attributes, + metric_types, + metric_values, + }) +} diff --git a/packages/core/api/actor/src/route/actors/mod.rs b/packages/core/api/actor/src/route/actors/mod.rs new file mode 100644 index 0000000000..626c071107 --- /dev/null +++ b/packages/core/api/actor/src/route/actors/mod.rs @@ -0,0 +1,866 @@ +use std::{collections::HashMap, time::Duration}; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use futures_util::{StreamExt, TryStreamExt}; +use proto::backend; +use rivet_api::{ + apis::{actors_api, configuration::Configuration}, + models, +}; +use rivet_operation::prelude::*; +use serde::Deserialize; +use tracing::Instrument; + +use crate::auth::{Auth, CheckOpts, CheckOutput}; + +use super::GlobalQuery; + +pub mod logs; +pub mod metrics; +pub mod v1; + +#[derive(Debug, Clone, Deserialize)] +pub struct GlobalEndpointTypeQuery { + #[serde(flatten)] + global: GlobalQuery, + endpoint_type: Option, +} + +// MARK: GET /v2/actors/{} +#[tracing::instrument(skip_all)] +pub async fn get( + ctx: Ctx, + actor_id: util::Id, + _watch_index: WatchIndexQuery, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let dcs = if let Some(label) = actor_id.label() { + ctx.op(cluster::ops::datacenter::get_for_label::Input { + labels: vec![label], + }) + .await? + .datacenters + } else { + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await? + .datacenters + }; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter for the given actor + let mut futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use actors_api::ActorsGetError::*; + match actors_api::actors_get( + &config, + &actor_id.to_string(), + query.global.project.as_deref(), + query.global.environment.as_deref(), + query.endpoint_type, + ) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + let mut last_error = None; + + // Return first api response that succeeds + while let Some(result) = futures.next().await { + match result { + Ok(value) => return Ok(value), + Err(err) => last_error = Some(err), + } + } + + // Otherwise return the last error + Err(unwrap!(last_error)) +} + +// MARK: POST /v2/actors +#[tracing::instrument(skip_all)] +pub async fn create( + ctx: Ctx, + body: models::ActorsCreateActorRequest, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_name_id = resolve_dc(&ctx, cluster_id, body.region.clone()).await?; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc_name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use actors_api::ActorsCreateError::*; + match actors_api::actors_create( + &config, + body, + query.global.project.as_deref(), + query.global.environment.as_deref(), + query.endpoint_type, + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } +} + +// MARK: DELETE /v2/actors/{} +#[derive(Debug, Clone, Deserialize)] +pub struct DeleteQuery { + #[serde(flatten)] + global: GlobalQuery, + override_kill_timeout: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn destroy( + ctx: Ctx, + actor_id: util::Id, + query: DeleteQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let dcs = if let Some(label) = actor_id.label() { + ctx.op(cluster::ops::datacenter::get_for_label::Input { + labels: vec![label], + }) + .await? + .datacenters + } else { + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await? + .datacenters + }; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let mut futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use actors_api::ActorsDestroyError::*; + match actors_api::actors_destroy( + &config, + &actor_id.to_string(), + query.global.project.as_deref(), + query.global.environment.as_deref(), + query.override_kill_timeout, + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + let mut error: Option = None; + + // Return first api response that succeeds + while let Some(result) = futures.next().await { + match result { + Ok(value) => return Ok(value), + Err(err) => { + // Overwrite error if its currently an ACTOR_NOT_FOUND error or None + if error + .as_ref() + .map(|err| err.is(formatted_error::code::ACTOR_NOT_FOUND)) + .unwrap_or(true) + { + error = Some(err); + } + } + } + } + + // Otherwise return error + Err(unwrap!(error)) +} + +// MARK: POST /v2/actors/{}/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade( + ctx: Ctx, + actor_id: util::Id, + body: models::ActorsUpgradeActorRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let dcs = if let Some(label) = actor_id.label() { + ctx.op(cluster::ops::datacenter::get_for_label::Input { + labels: vec![label], + }) + .await? + .datacenters + } else { + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await? + .datacenters + }; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let mut futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use actors_api::ActorsUpgradeError::*; + match actors_api::actors_upgrade( + &config, + &actor_id.to_string(), + body.clone(), + query.project.as_deref(), + query.environment.as_deref(), + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + let mut last_error = None; + + // Return first api response that succeeds + while let Some(result) = futures.next().await { + match result { + Ok(value) => return Ok(value), + Err(err) => last_error = Some(err), + } + } + + // Otherwise return the last error + Err(unwrap!(last_error)) +} + +// MARK: POST /v2/actors/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade_all( + ctx: Ctx, + body: models::ActorsUpgradeAllActorsRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let tags = unwrap_with!(&body.tags, API_BAD_BODY, error = "missing property `tags`"); + + ensure_with!( + tags.as_object().map(|x| x.len()).unwrap_or_default() <= 8, + API_BAD_BODY, + error = "Too many tags (max 8)." + ); + + let tags = unwrap_with!( + serde_json::from_value::>(tags.clone()).ok(), + API_BAD_BODY, + error = "`tags` must be `Map`" + ); + + for (k, v) in &tags { + ensure_with!( + !k.is_empty(), + API_BAD_BODY, + error = "tags[]: Tag label cannot be empty." + ); + ensure_with!( + k.len() <= 32, + API_BAD_BODY, + error = format!( + "tags[{:?}]: Tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ), + ); + ensure_with!( + !v.is_empty(), + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value cannot be empty.") + ); + ensure_with!( + v.len() <= 1024, + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value too large (max 1024 bytes)."), + ); + } + + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await?; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs_res + .datacenters + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use actors_api::ActorsUpgradeAllError::*; + match actors_api::actors_upgrade_all( + &config, + body.clone(), + query.project.as_deref(), + query.environment.as_deref(), + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + + // Aggregate results + let count = futures_util::stream::iter(futures) + .buffer_unordered(16) + .try_fold(0, |a, res| std::future::ready(Ok(a + res.count))) + .await?; + + Ok(models::ActorsUpgradeAllActorsResponse { count }) +} + +// MARK: GET /v2/actors +#[derive(Debug, Clone, Deserialize)] +pub struct ListQuery { + #[serde(flatten)] + global_endpoint_type: GlobalEndpointTypeQuery, + tags_json: Option, + include_destroyed: Option, + cursor: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn list_actors( + ctx: Ctx, + _watch_index: WatchIndexQuery, + query: ListQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global_endpoint_type.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + // Validate tags + if let Some(tags) = &query.tags_json { + let tags = unwrap_with!( + serde_json::from_str::>(tags).ok(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "`tags` must be `Map`" + ); + + ensure_with!( + tags.len() <= 8, + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "Too many tags (max 8)." + ); + + for (k, v) in &tags { + ensure_with!( + !k.is_empty(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "tags_json[]: Tag label cannot be empty." + ); + ensure_with!( + k.len() <= 32, + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = format!( + "tags_json[{:?}]: Tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ), + ); + ensure_with!( + !v.is_empty(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = format!("tags_json[{k:?}]: Tag value cannot be empty.") + ); + ensure_with!( + v.len() <= 1024, + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = format!("tags_json[{k:?}]: Tag value too large (max 1024 bytes)."), + ); + } + } + + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await?; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs_res + .datacenters + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + let timeout_res = tokio::time::timeout( + Duration::from_secs(30), + actors_api::actors_list( + &config, + query.global_endpoint_type.global.project.as_deref(), + query.global_endpoint_type.global.environment.as_deref(), + query.global_endpoint_type.endpoint_type, + query.tags_json.as_deref(), + query.include_destroyed, + query.cursor.as_deref(), + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)), + ) + .await; + + use actors_api::ActorsListError::*; + match timeout_res { + Ok(timeout_res) => match timeout_res { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => { + return Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()) + } + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + }, + Err(_) => { + tracing::error!(dc=?dc.name_id, "timed out requesting dc"); + bail_with!(API_REQUEST_TIMEOUT); + } + } + }) + .collect::>(); + + let mut results = futures_util::stream::iter(futures) + .buffer_unordered(16) + .collect::>() + .await; + + // Aggregate results + let mut actors = Vec::new(); + for res in &mut results { + match res { + Ok(res) => actors.extend(std::mem::take(&mut res.actors)), + Err(err) => tracing::error!(?err, "failed to request edge dc"), + } + } + + // Error only if all requests failed + if !results.is_empty() && results.iter().all(|res| res.is_err()) { + return Err(unwrap!(unwrap!(results.into_iter().next()).err())); + } + + // Sort by create ts desc + // + // This is an ISO 8601 string and is safely sortable + actors.sort_by_cached_key(|x| std::cmp::Reverse(x.created_at.clone())); + + // Shorten array since returning all actors from all regions could end up returning `regions * + // 32` results, which is a lot. + actors.truncate(32); + + // TODO: Subtracting a ms might skip an actor in a rare edge case, need to build compound + // cursor of [created_at, actor_id] that we pass to the fdb range + let cursor = actors.last().map(|x| { + let datetime = x + .created_at + .parse::>() + .unwrap_or_default(); + let unix_ts = datetime.timestamp_millis() - 1; + unix_ts.to_string() + }); + + Ok(models::ActorsListActorsResponse { + actors, + pagination: Box::new(models::Pagination { cursor }), + }) +} + +#[tracing::instrument(skip_all)] +pub(crate) async fn resolve_dc( + ctx: &Ctx, + cluster_id: Uuid, + region: Option, +) -> GlobalResult { + if let Some(region) = region { + let dcs_res = ctx + .op(cluster::ops::datacenter::resolve_for_name_id::Input { + cluster_id, + name_ids: vec![region], + }) + .await?; + let dc = unwrap_with!( + dcs_res.datacenters.first(), + ACTOR_FAILED_TO_CREATE, + error = "Region not found." + ); + + Ok(dc.name_id.clone()) + } + // Auto-select the closest region + else { + let clusters_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(clusters_res.clusters.first()); + + let datacenter_id = if let Some((lat, long)) = ctx.coords() { + let recommend_res = op!([ctx] region_recommend { + region_ids: cluster + .datacenter_ids + .iter() + .cloned() + .map(Into::into) + .collect(), + coords: Some(backend::net::Coordinates { + latitude: lat, + longitude: long, + }), + ..Default::default() + }) + .await?; + let primary_region = unwrap!(recommend_res.regions.first()); + let primary_region_id = unwrap_ref!(primary_region.region_id).as_uuid(); + + primary_region_id + } else { + tracing::warn!("coords not provided to select region"); + + let datacenter_id = *unwrap_with!( + cluster.datacenter_ids.first(), + ACTOR_FAILED_TO_CREATE, + error = "No regions found." + ); + + datacenter_id + }; + + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![datacenter_id], + }) + .await?; + let dc = unwrap_with!( + dc_res.datacenters.first(), + ACTOR_FAILED_TO_CREATE, + error = "Region not found." + ); + + Ok(dc.name_id.clone()) + } +} diff --git a/packages/core/api/actor/src/route/logs.rs b/packages/core/api/actor/src/route/actors/v1/logs.rs similarity index 76% rename from packages/core/api/actor/src/route/logs.rs rename to packages/core/api/actor/src/route/actors/v1/logs.rs index d68178f378..403a30d63f 100644 --- a/packages/core/api/actor/src/route/logs.rs +++ b/packages/core/api/actor/src/route/actors/v1/logs.rs @@ -2,7 +2,6 @@ use api_helper::{ anchor::{WatchIndexQuery, WatchResponse}, ctx::Ctx, }; -use proto::backend; use rivet_api::models; use rivet_operation::prelude::*; use serde::Deserialize; @@ -21,8 +20,14 @@ use super::GlobalQuery; pub struct GetActorLogsQuery { #[serde(flatten)] pub global: GlobalQuery, - /// JSON-encoded user query expression for filtering logs - pub query_json: Option, + pub stream: models::ActorsV1QueryLogStream, + pub actor_ids_json: String, + #[serde(default)] + pub search_text: Option, + #[serde(default)] + pub search_case_sensitive: Option, + #[serde(default)] + pub search_enable_regex: Option, } #[tracing::instrument(skip_all)] @@ -30,7 +35,7 @@ pub async fn get_logs( ctx: Ctx, watch_index: WatchIndexQuery, query: GetActorLogsQuery, -) -> GlobalResult { +) -> GlobalResult { let CheckOutput { game_id, env_id } = ctx .auth() .check( @@ -43,18 +48,31 @@ pub async fn get_logs( ) .await?; - // Parse user query expression if provided - let user_query_expr = if let Some(query_json) = &query.query_json { - let expr = match serde_json::from_str::(query_json) { - Ok(expr) => expr, - Err(e) => { - bail_with!(API_BAD_QUERY, error = e.to_string()); - } - }; - Some(expr) - } else { - // No query provided, return empty result - None + // Parse actor IDs from the JSON string + let actor_ids: Vec = unwrap_with!( + serde_json::from_str(&query.actor_ids_json).ok(), + ACTOR_LOGS_INVALID_ACTOR_IDS + ); + + ensure_with!(!actor_ids.is_empty(), ACTOR_LOGS_NO_ACTOR_IDS); + + // Filter to only valid actors for this game/env + let valid_actor_ids = assert::actor_for_env_v1(&ctx, &actor_ids, game_id, env_id, None).await?; + + // Exit early if no valid actors + ensure_with!(!valid_actor_ids.is_empty(), ACTOR_LOGS_NO_VALID_ACTOR_IDS); + + // Use only the valid actor IDs from now on + let actor_ids = valid_actor_ids; + + // Determine stream type(s) + let stream_types = match query.stream { + models::ActorsV1QueryLogStream::StdOut => vec![pegboard::types::LogsStreamType::StdOut], + models::ActorsV1QueryLogStream::StdErr => vec![pegboard::types::LogsStreamType::StdErr], + models::ActorsV1QueryLogStream::All => vec![ + pegboard::types::LogsStreamType::StdOut, + pegboard::types::LogsStreamType::StdErr, + ], }; // Timestamp to start the query at @@ -63,7 +81,8 @@ pub async fn get_logs( // Handle anchor let logs_res = if let Some(anchor) = watch_index.as_i64()? { let query_start = tokio::time::Instant::now(); - let user_query_expr_clone = user_query_expr.clone(); + let stream_types_clone = stream_types.clone(); + let actor_ids_clone = actor_ids.clone(); // Poll for new logs let logs_res = loop { @@ -76,12 +95,15 @@ pub async fn get_logs( // We return fewer logs than the non-anchor request since this will be called // frequently and should not return a significant amount of logs. let logs_res = ctx - .op(pegboard::ops::actor::log::read_with_query::Input { - env_id, + .op(pegboard::ops::actor::v1::log::read::Input { + actor_ids: actor_ids_clone.clone(), + stream_types: stream_types_clone.clone(), count: 64, - order_by: pegboard::ops::actor::log::read_with_query::Order::Desc, - query: pegboard::ops::actor::log::read_with_query::Query::AfterNts(anchor), - user_query_expr: user_query_expr_clone.clone(), + order_by: pegboard::ops::actor::v1::log::read::Order::Desc, + query: pegboard::ops::actor::v1::log::read::Query::AfterNts(anchor), + search_text: query.search_text.clone(), + search_case_sensitive: query.search_case_sensitive, + search_enable_regex: query.search_enable_regex, }) .await?; @@ -92,7 +114,7 @@ pub async fn get_logs( // Timeout cleanly if query_start.elapsed().as_millis() > util::watch::DEFAULT_TIMEOUT as u128 { - break pegboard::ops::actor::log::read_with_query::Output { + break pegboard::ops::actor::v1::log::read::Output { entries: Vec::new(), }; } @@ -112,32 +134,22 @@ pub async fn get_logs( logs_res } else { // Read most recent logs - ctx.op(pegboard::ops::actor::log::read_with_query::Input { - env_id, + + ctx.op(pegboard::ops::actor::v1::log::read::Input { + actor_ids: actor_ids.clone(), + stream_types: stream_types.clone(), count: 256, - order_by: pegboard::ops::actor::log::read_with_query::Order::Desc, - query: pegboard::ops::actor::log::read_with_query::Query::BeforeNts(before_nts), - user_query_expr: user_query_expr.clone(), + order_by: pegboard::ops::actor::v1::log::read::Order::Desc, + query: pegboard::ops::actor::v1::log::read::Query::BeforeNts(before_nts), + search_text: query.search_text.clone(), + search_case_sensitive: query.search_case_sensitive, + search_enable_regex: query.search_enable_regex, }) .await? }; - // Convert to old Output format for compatibility - let logs_res = pegboard::ops::actor::log::read::Output { - entries: logs_res - .entries - .into_iter() - .map(|e| pegboard::ops::actor::log::read::LogEntry { - ts: e.ts, - message: e.message, - stream_type: e.stream_type, - actor_id: e.actor_id, - }) - .collect(), - }; - // Build actor_ids map for lookup - let mut actor_id_to_index: std::collections::HashMap = + let mut actor_id_to_index: std::collections::HashMap = std::collections::HashMap::new(); let mut unique_actor_ids: Vec = Vec::new(); @@ -145,7 +157,7 @@ pub async fn get_logs( for entry in &logs_res.entries { if !actor_id_to_index.contains_key(&entry.actor_id) { actor_id_to_index.insert(entry.actor_id.clone(), unique_actor_ids.len() as i32); - unique_actor_ids.push(entry.actor_id.clone()); + unique_actor_ids.push(entry.actor_id.to_string()); } } @@ -167,11 +179,6 @@ pub async fn get_logs( .iter() .map(|x| x.stream_type as i32) .collect::>(); - let mut foreigns = logs_res - .entries - .iter() - .map(|x| x.foreign) - .collect::>(); let mut actor_indices = logs_res .entries .iter() @@ -182,16 +189,14 @@ pub async fn get_logs( lines.reverse(); timestamps.reverse(); streams.reverse(); - foreigns.reverse(); actor_indices.reverse(); let watch_nts = logs_res.entries.first().map_or(before_nts, |x| x.ts); - Ok(models::ActorsGetActorLogsResponse { + Ok(models::ActorsV1GetActorLogsResponse { actor_ids: unique_actor_ids, lines, timestamps, streams, - foreigns, actor_indices, watch: WatchResponse::new_as_model(watch_nts), }) diff --git a/packages/core/api/actor/src/route/metrics.rs b/packages/core/api/actor/src/route/actors/v1/metrics.rs similarity index 93% rename from packages/core/api/actor/src/route/metrics.rs rename to packages/core/api/actor/src/route/actors/v1/metrics.rs index 4795347e67..84c5190a5e 100644 --- a/packages/core/api/actor/src/route/metrics.rs +++ b/packages/core/api/actor/src/route/actors/v1/metrics.rs @@ -1,7 +1,4 @@ -use api_helper::{ - anchor::{WatchIndexQuery, WatchResponse}, - ctx::Ctx, -}; +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; use rivet_api::models; use rivet_operation::prelude::*; use serde::Deserialize; @@ -10,7 +7,6 @@ use std::collections::{BTreeMap, HashMap}; use crate::{ assert, auth::{Auth, CheckOpts, CheckOutput}, - utils::build_global_query_compat, }; use super::GlobalQuery; @@ -59,13 +55,14 @@ pub struct ProcessedMetricRow { pub metric_type: MetricType, } +// TODO: Move contents of this to an op #[tracing::instrument(skip_all)] pub async fn get_metrics( ctx: Ctx, actor_id: Uuid, - watch_index: WatchIndexQuery, + _watch_index: WatchIndexQuery, query: GetActorMetricsQuery, -) -> GlobalResult { +) -> GlobalResult { let CheckOutput { game_id, env_id } = ctx .auth() .check( @@ -79,7 +76,7 @@ pub async fn get_metrics( .await?; // Validate the actor belongs to this game/env - assert::actor_for_env(&ctx, &[actor_id], game_id, env_id, None).await?; + assert::actor_for_env_v1(&ctx, &[actor_id], game_id, env_id, None).await?; let clickhouse = ctx.clickhouse().await?; @@ -92,7 +89,7 @@ pub async fn get_metrics( bail_with!(ACTOR_METRICS_INVALID_INTERVAL); } - let actor_prefix = format!("/system.slice/pegboard-actor-{}-", actor_id); + let prefix = format!("/system.slice/pegboard-actor-{}-", actor_id); // Available gauge metrics: // - container_cpu_load_average_10s @@ -122,7 +119,8 @@ pub async fn get_metrics( // - container_processes // Query gauge metrics (current values) - let gauge_query = indoc! {" + let gauge_query = indoc!( + " SELECT toUInt32((toUnixTimestamp(toStartOfInterval(TimeUnix, INTERVAL ? second)) - ?) / ?) as time_bucket_index, MetricName as metric_name, @@ -145,7 +143,8 @@ pub async fn get_metrics( AND startsWith(Attributes['id'], ?) GROUP BY time_bucket_index, metric_name, tcp_state, udp_state, device, failure_type, scope, task_state, interface ORDER BY time_bucket_index ASC, metric_name - "}; + " + ); let gauge_future = clickhouse .query(&gauge_query) @@ -154,7 +153,7 @@ pub async fn get_metrics( .bind(interval_seconds) .bind(start_seconds) .bind(end_seconds) - .bind(&actor_prefix) + .bind(&prefix) .fetch_all::(); // Available sum metrics: @@ -178,7 +177,8 @@ pub async fn get_metrics( // - container_network_transmit_packets_total // Query sum metrics (rates/counters) - let sum_query = indoc! {" + let sum_query = indoc!( + " SELECT toUInt32((toUnixTimestamp(toStartOfInterval(TimeUnix, INTERVAL ? second)) - ?) / ?) as time_bucket_index, MetricName as metric_name, @@ -201,7 +201,8 @@ pub async fn get_metrics( AND startsWith(Attributes['id'], ?) GROUP BY time_bucket_index, metric_name, tcp_state, udp_state, device, failure_type, scope, task_state, interface ORDER BY time_bucket_index ASC, metric_name - "}; + " + ); let sum_future = clickhouse .query(&sum_query) @@ -210,7 +211,7 @@ pub async fn get_metrics( .bind(interval_seconds) .bind(start_seconds) .bind(end_seconds) - .bind(&actor_prefix) + .bind(&prefix) .fetch_all::(); let (gauge_rows, sum_rows) = @@ -282,7 +283,7 @@ pub async fn get_metrics( let metric_key = (row.metric_name.clone(), attributes); // Initialize metric entry if it doesn't exist - let (existing_type, metric_values) = metrics.entry(metric_key).or_insert_with(|| { + let (_existing_type, metric_values) = metrics.entry(metric_key).or_insert_with(|| { ( processed_row.metric_type.as_str().to_string(), vec![0.0; num_buckets], @@ -319,7 +320,7 @@ pub async fn get_metrics( metric_values.push(values); } - Ok(models::ActorsGetActorMetricsResponse { + Ok(models::ActorsV1GetActorMetricsResponse { actor_ids: vec![actor_id.to_string()], metric_names, metric_attributes, diff --git a/packages/core/api/actor/src/route/actors.rs b/packages/core/api/actor/src/route/actors/v1/mod.rs similarity index 93% rename from packages/core/api/actor/src/route/actors.rs rename to packages/core/api/actor/src/route/actors/v1/mod.rs index de38c1b417..081fda4d48 100644 --- a/packages/core/api/actor/src/route/actors.rs +++ b/packages/core/api/actor/src/route/actors/v1/mod.rs @@ -22,6 +22,9 @@ use crate::{ use super::GlobalQuery; +pub mod logs; +pub mod metrics; + #[derive(Debug, Clone, Deserialize)] pub struct GlobalEndpointTypeQuery { #[serde(flatten)] @@ -33,7 +36,7 @@ pub struct GlobalEndpointTypeQuery { #[tracing::instrument(skip_all)] pub async fn get( ctx: Ctx, - actor_id: util::Id, + actor_id: Uuid, watch_index: WatchIndexQuery, query: GlobalEndpointTypeQuery, ) -> GlobalResult { @@ -42,7 +45,7 @@ pub async fn get( async fn get_inner( ctx: &Ctx, - actor_id: util::Id, + actor_id: Uuid, _watch_index: WatchIndexQuery, query: GlobalEndpointTypeQuery, ) -> GlobalResult { @@ -58,35 +61,28 @@ async fn get_inner( ) .await?; - let dcs = if let Some(label) = actor_id.label() { - ctx.op(cluster::ops::datacenter::get_for_label::Input { - labels: vec![label], + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], }) - .await? - .datacenters - } else { - // Fetch all datacenters - let clusters_res = ctx - .op(cluster::ops::get_for_game::Input { - game_ids: vec![game_id], - }) - .await?; - let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; - let dc_list_res = ctx - .op(cluster::ops::datacenter::list::Input { - cluster_ids: vec![cluster_id], - }) - .await?; - let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); - ctx.op(cluster::ops::datacenter::get::Input { + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { datacenter_ids: cluster.datacenter_ids, }) - .await? - .datacenters - }; + .await?; // Filter the datacenters that can be contacted - let filtered_datacenters = dcs + let filtered_datacenters = dcs_res + .datacenters .into_iter() .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) .collect::>(); @@ -332,10 +328,6 @@ pub async fn create_deprecated( ), wait_ready: None, })), - resources: Some(Box::new(models::ActorsResources { - cpu: body.resources.cpu, - memory: body.resources.memory, - })), runtime: Some(Box::new(models::ActorsCreateActorRuntimeRequest { environment: body.runtime.environment, network: None, @@ -367,7 +359,7 @@ pub struct DeleteQuery { #[tracing::instrument(skip_all)] pub async fn destroy( ctx: Ctx, - actor_id: util::Id, + actor_id: Uuid, query: DeleteQuery, ) -> GlobalResult { let CheckOutput { game_id, .. } = ctx @@ -382,35 +374,28 @@ pub async fn destroy( ) .await?; - let dcs = if let Some(label) = actor_id.label() { - ctx.op(cluster::ops::datacenter::get_for_label::Input { - labels: vec![label], + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], }) - .await? - .datacenters - } else { - // Fetch all datacenters - let clusters_res = ctx - .op(cluster::ops::get_for_game::Input { - game_ids: vec![game_id], - }) - .await?; - let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; - let dc_list_res = ctx - .op(cluster::ops::datacenter::list::Input { - cluster_ids: vec![cluster_id], - }) - .await?; - let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); - ctx.op(cluster::ops::datacenter::get::Input { + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { datacenter_ids: cluster.datacenter_ids, }) - .await? - .datacenters - }; + .await?; // Filter the datacenters that can be contacted - let filtered_datacenters = dcs + let filtered_datacenters = dcs_res + .datacenters .into_iter() .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) .collect::>(); @@ -507,7 +492,7 @@ pub async fn destroy_deprecated( #[tracing::instrument(skip_all)] pub async fn upgrade( ctx: Ctx, - actor_id: util::Id, + actor_id: Uuid, body: models::ActorsUpgradeActorRequest, query: GlobalQuery, ) -> GlobalResult { @@ -523,35 +508,28 @@ pub async fn upgrade( ) .await?; - let dcs = if let Some(label) = actor_id.label() { - ctx.op(cluster::ops::datacenter::get_for_label::Input { - labels: vec![label], + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], }) - .await? - .datacenters - } else { - // Fetch all datacenters - let clusters_res = ctx - .op(cluster::ops::get_for_game::Input { - game_ids: vec![game_id], - }) - .await?; - let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; - let dc_list_res = ctx - .op(cluster::ops::datacenter::list::Input { - cluster_ids: vec![cluster_id], - }) - .await?; - let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); - ctx.op(cluster::ops::datacenter::get::Input { + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { datacenter_ids: cluster.datacenter_ids, }) - .await? - .datacenters - }; + .await?; // Filter the datacenters that can be contacted - let filtered_datacenters = dcs + let filtered_datacenters = dcs_res + .datacenters .into_iter() .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) .collect::>(); @@ -1055,8 +1033,7 @@ fn legacy_convert_actor_to_server( }) .transpose()?, environment: Uuid::nil(), - // New ids are not supported by old servers - id: util::Id::parse(&a.id)?.as_v0().unwrap_or_else(Uuid::nil), + id: util::uuid::parse(&a.id)?, lifecycle: Box::new(models::ServersLifecycle { kill_timeout: a.lifecycle.kill_timeout, }), @@ -1098,10 +1075,7 @@ fn legacy_convert_actor_to_server( }) .collect(), }), - resources: Box::new(models::ServersResources { - cpu: a.resources.as_ref().map(|x| x.cpu).unwrap_or(0), - memory: a.resources.as_ref().map(|x| x.memory).unwrap_or(0), - }), + resources: Box::new(models::ServersResources { cpu: 0, memory: 0 }), runtime: Box::new(models::ServersRuntime { arguments: a.runtime.arguments, build: a.runtime.build, diff --git a/packages/core/api/actor/src/route/builds.rs b/packages/core/api/actor/src/route/builds.rs index 72a5fad540..4eb0a01274 100644 --- a/packages/core/api/actor/src/route/builds.rs +++ b/packages/core/api/actor/src/route/builds.rs @@ -23,7 +23,7 @@ use crate::{ use super::GlobalQuery; -// MARK: GET /builds/{} +// MARK: GET /v1/builds/{} #[tracing::instrument(skip_all)] pub async fn get( ctx: Ctx, @@ -112,7 +112,7 @@ pub async fn get_deprecated( }) } -// MARK: GET /builds +// MARK: GET /v1/builds #[derive(Debug, Clone, Deserialize)] pub struct ListQuery { #[serde(flatten)] @@ -251,7 +251,7 @@ pub async fn list_deprecated( }) } -// MARK: PATCH /builds/{}/tags +// MARK: PATCH /v1/builds/{}/tags #[tracing::instrument(skip_all)] pub async fn patch_tags( ctx: Ctx, @@ -351,7 +351,7 @@ pub async fn patch_tags_deprecated( .await } -// MARK: POST /builds/prepare +// MARK: POST /v1/builds/prepare #[tracing::instrument(skip_all)] pub async fn create_build( ctx: Ctx, @@ -491,7 +491,7 @@ pub async fn create_build_deprecated( }) } -// MARK: POST /builds/{}/complete +// MARK: POST /v1/builds/{}/complete #[tracing::instrument(skip_all)] pub async fn complete_build( ctx: Ctx, diff --git a/packages/core/api/actor/src/route/containers/logs.rs b/packages/core/api/actor/src/route/containers/logs.rs new file mode 100644 index 0000000000..7f514fdc02 --- /dev/null +++ b/packages/core/api/actor/src/route/containers/logs.rs @@ -0,0 +1,213 @@ +use api_helper::{ + anchor::{WatchIndexQuery, WatchResponse}, + ctx::Ctx, +}; +use rivet_api::models; +use rivet_operation::prelude::*; +use serde::Deserialize; +use std::time::Duration; + +use crate::{ + assert, + auth::{Auth, CheckOpts, CheckOutput}, +}; + +use super::GlobalQuery; + +// MARK: GET /v1/container/{}/logs +#[derive(Debug, Deserialize)] +pub struct GetContainerLogsQuery { + #[serde(flatten)] + pub global: GlobalQuery, + pub stream: models::ContainersQueryLogStream, + pub container_ids_json: String, + #[serde(default)] + pub search_text: Option, + #[serde(default)] + pub search_case_sensitive: Option, + #[serde(default)] + pub search_enable_regex: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn get_logs( + ctx: Ctx, + watch_index: WatchIndexQuery, + query: GetContainerLogsQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: false, + opt_auth: false, + }, + ) + .await?; + + // Parse container IDs from the JSON string + let container_ids: Vec = unwrap_with!( + serde_json::from_str(&query.container_ids_json).ok(), + CONTAINER_LOGS_INVALID_CONTAINER_IDS + ); + + ensure_with!(!container_ids.is_empty(), CONTAINER_LOGS_NO_CONTAINER_IDS); + + // Filter to only valid containers for this game/env + let valid_container_ids = + assert::actor_for_env(&ctx, &container_ids, game_id, env_id, None).await?; + + // Exit early if no valid containers + ensure_with!( + !valid_container_ids.is_empty(), + CONTAINER_LOGS_NO_VALID_CONTAINER_IDS + ); + + // Use only the valid container IDs from now on + let container_ids = valid_container_ids; + + // Determine stream type(s) + let stream_types = match query.stream { + models::ContainersQueryLogStream::StdOut => vec![pegboard::types::LogsStreamType::StdOut], + models::ContainersQueryLogStream::StdErr => vec![pegboard::types::LogsStreamType::StdErr], + models::ContainersQueryLogStream::All => vec![ + pegboard::types::LogsStreamType::StdOut, + pegboard::types::LogsStreamType::StdErr, + ], + }; + + // Timestamp to start the query at + let before_nts = util::timestamp::now() * 1_000_000; + + // Handle anchor + let logs_res = if let Some(anchor) = watch_index.as_i64()? { + let query_start = tokio::time::Instant::now(); + let stream_types_clone = stream_types.clone(); + let container_ids_clone = container_ids.clone(); + + // Poll for new logs + let logs_res = loop { + // Read logs after the timestamp + // + // We read descending in order to get at most 256 of the most recent logs. If we used + // asc, we would be paginating through all the logs which would likely fall behind + // actual stream and strain the database. + // + // We return fewer logs than the non-anchor request since this will be called + // frequently and should not return a significant amount of logs. + let logs_res = ctx + .op(pegboard::ops::actor::log::read::Input { + actor_ids: container_ids_clone.clone(), + stream_types: stream_types_clone.clone(), + count: 64, + order_by: pegboard::ops::actor::log::read::Order::Desc, + query: pegboard::ops::actor::log::read::Query::AfterNts(anchor), + search_text: query.search_text.clone(), + search_case_sensitive: query.search_case_sensitive, + search_enable_regex: query.search_enable_regex, + }) + .await?; + + // Return logs + if !logs_res.entries.is_empty() { + break logs_res; + } + + // Timeout cleanly + if query_start.elapsed().as_millis() > util::watch::DEFAULT_TIMEOUT as u128 { + break pegboard::ops::actor::log::read::Output { + entries: Vec::new(), + }; + } + + // Throttle request + // + // We don't use `tokio::time::interval` because if the request takes longer than 500 + // ms, we'll enter a tight loop of requests. + tokio::time::sleep(Duration::from_millis(1000)).await; + }; + + // Since we're using watch, we don't want this request to return immediately if there's new + // results. Add an artificial timeout in order to prevent a tight loop if there's a high + // log frequency. + tokio::time::sleep_until(query_start + Duration::from_secs(1)).await; + + logs_res + } else { + // Read most recent logs + + ctx.op(pegboard::ops::actor::log::read::Input { + actor_ids: container_ids.clone(), + stream_types: stream_types.clone(), + count: 256, + order_by: pegboard::ops::actor::log::read::Order::Desc, + query: pegboard::ops::actor::log::read::Query::BeforeNts(before_nts), + search_text: query.search_text.clone(), + search_case_sensitive: query.search_case_sensitive, + search_enable_regex: query.search_enable_regex, + }) + .await? + }; + + // Build container_ids map for lookup + let mut container_id_to_index: std::collections::HashMap = + std::collections::HashMap::new(); + let mut unique_container_ids: Vec = Vec::new(); + + // Collect unique container IDs and map them to indices + for entry in &logs_res.entries { + if !container_id_to_index.contains_key(&entry.actor_id) { + container_id_to_index.insert(entry.actor_id.clone(), unique_container_ids.len() as i32); + unique_container_ids.push(entry.actor_id.to_string()); + } + } + + // Convert logs + let mut lines = logs_res + .entries + .iter() + .map(|entry| base64::encode(&entry.message)) + .collect::>(); + let mut timestamps = logs_res + .entries + .iter() + // Is nanoseconds + .map(|x| x.ts / 1_000_000) + .map(util::timestamp::to_string) + .collect::, _>>()?; + let mut streams = logs_res + .entries + .iter() + .map(|x| x.stream_type as i32) + .collect::>(); + let mut foreigns = logs_res + .entries + .iter() + .map(|x| x.foreign) + .collect::>(); + let mut container_indices = logs_res + .entries + .iter() + .map(|x| *container_id_to_index.get(&x.actor_id).unwrap_or(&0)) + .collect::>(); + + // Order desc + lines.reverse(); + timestamps.reverse(); + streams.reverse(); + foreigns.reverse(); + container_indices.reverse(); + + let watch_nts = logs_res.entries.first().map_or(before_nts, |x| x.ts); + Ok(models::ContainersGetContainerLogsResponse { + container_ids: unique_container_ids, + lines, + timestamps, + streams, + foreigns, + container_indices, + watch: WatchResponse::new_as_model(watch_nts), + }) +} diff --git a/packages/core/api/actor/src/route/containers/metrics.rs b/packages/core/api/actor/src/route/containers/metrics.rs new file mode 100644 index 0000000000..7d63c7b49f --- /dev/null +++ b/packages/core/api/actor/src/route/containers/metrics.rs @@ -0,0 +1,336 @@ +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use rivet_api::models; +use rivet_operation::prelude::*; +use serde::Deserialize; +use std::collections::{BTreeMap, HashMap}; + +use crate::{ + assert, + auth::{Auth, CheckOpts, CheckOutput}, +}; + +use super::GlobalQuery; + +#[derive(Debug, Deserialize)] +pub struct GetContainerMetricsQuery { + #[serde(flatten)] + pub global: GlobalQuery, + pub start: i64, + pub end: i64, + pub interval: i64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum MetricType { + Counter, + Gauge, +} + +impl MetricType { + pub fn as_str(&self) -> &'static str { + match self { + MetricType::Counter => "counter", + MetricType::Gauge => "gauge", + } + } +} + +#[derive(Debug, clickhouse::Row, serde::Deserialize)] +pub struct MetricRow { + pub time_bucket_index: u32, + pub metric_name: String, + pub value: f64, + pub tcp_state: String, + pub udp_state: String, + pub device: String, + pub failure_type: String, + pub scope: String, + pub task_state: String, + pub interface: String, +} + +#[derive(Debug)] +pub struct ProcessedMetricRow { + pub row: MetricRow, + pub metric_type: MetricType, +} + +// TODO: Move contents of this to an op +#[tracing::instrument(skip_all)] +pub async fn get_metrics( + ctx: Ctx, + container_id: util::Id, + _watch_index: WatchIndexQuery, + query: GetContainerMetricsQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: false, + opt_auth: false, + }, + ) + .await?; + + // Validate the container belongs to this game/env + assert::actor_for_env(&ctx, &[container_id], game_id, env_id, None).await?; + + let clickhouse = ctx.clickhouse().await?; + + // Convert milliseconds to seconds for ClickHouse + let start_seconds = query.start / 1000; + let end_seconds = query.end / 1000; + let interval_seconds = query.interval / 1000; + + if interval_seconds == 0 { + bail_with!(CONTAINER_METRICS_INVALID_INTERVAL); + } + + let prefix = "/system.slice/pegboard-runner-"; + + // Query gauge metrics (current values) + let gauge_query = indoc!( + " + WITH runner_data AS ( + SELECT runner_id + FROM db_pegboard_runner.actor_runners + WHERE actor_id = ? + LIMIT 1 + ) + SELECT + toUInt32(floor((toUnixTimestamp(TimeUnix) - ?) / ?)) as time_bucket_index, + MetricName as metric_name, + max(Value) as value, + COALESCE(Attributes['tcp_state'], '') as tcp_state, + COALESCE(Attributes['udp_state'], '') as udp_state, + COALESCE(Attributes['device'], '') as device, + COALESCE(Attributes['failure_type'], '') as failure_type, + COALESCE(Attributes['scope'], '') as scope, + COALESCE(Attributes['state'], '') as task_state, + COALESCE(Attributes['interface'], '') as interface + FROM otel.otel_metrics_gauge + WHERE + TimeUnix >= fromUnixTimestamp(?) + AND TimeUnix <= fromUnixTimestamp(?) + AND MetricName IN [ + 'container_cpu_load_average_10s', + 'container_file_descriptors', + 'container_last_seen', + 'container_memory_usage_bytes', + 'container_memory_working_set_bytes', + 'container_memory_cache', + 'container_memory_rss', + 'container_memory_swap', + 'container_memory_mapped_file', + 'container_memory_max_usage_bytes', + 'container_network_tcp_usage_total', + 'container_network_tcp6_usage_total', + 'container_network_udp_usage_total', + 'container_network_udp6_usage_total', + 'container_sockets', + 'container_spec_cpu_period', + 'container_spec_cpu_shares', + 'container_spec_memory_limit_bytes', + 'container_spec_memory_reservation_limit_bytes', + 'container_spec_memory_swap_limit_bytes', + 'container_start_time_seconds', + 'container_tasks_state', + 'container_threads', + 'container_threads_max', + 'container_processes' + ] + AND has(Attributes, 'id') + AND startsWith(Attributes['id'], concat(?, runner_data.runner_id, '-')) + GROUP BY time_bucket_index, metric_name, tcp_state, udp_state, device, failure_type, scope, task_state, interface + ORDER BY time_bucket_index ASC, metric_name + " + ); + + let gauge_future = clickhouse + .query(&gauge_query) + .bind(&container_id) + .bind(start_seconds) + .bind(interval_seconds) + .bind(start_seconds) + .bind(end_seconds) + .bind(&prefix) + .fetch_all::(); + + // Query sum metrics (rates/counters) + let sum_query = indoc!( + " + WITH runner_data AS ( + SELECT runner_id + FROM db_pegboard_runner.actor_runners + WHERE actor_id = ? + LIMIT 1 + ) + SELECT + toUInt32(floor((toUnixTimestamp(TimeUnix) - ?) / ?)) as time_bucket_index, + MetricName as metric_name, + max(Value) as value, + COALESCE(Attributes['tcp_state'], '') as tcp_state, + COALESCE(Attributes['udp_state'], '') as udp_state, + COALESCE(Attributes['device'], '') as device, + COALESCE(Attributes['failure_type'], '') as failure_type, + COALESCE(Attributes['scope'], '') as scope, + COALESCE(Attributes['state'], '') as task_state, + COALESCE(Attributes['interface'], '') as interface + FROM otel.otel_metrics_sum + WHERE + TimeUnix >= fromUnixTimestamp(?) + AND TimeUnix <= fromUnixTimestamp(?) + AND MetricName IN [ + 'container_cpu_schedstat_run_periods_total', + 'container_cpu_schedstat_run_seconds_total', + 'container_cpu_schedstat_runqueue_seconds_total', + 'container_cpu_system_seconds_total', + 'container_cpu_user_seconds_total', + 'container_cpu_usage_seconds_total', + 'container_memory_failcnt', + 'container_memory_failures_total', + 'container_fs_reads_bytes_total', + 'container_fs_writes_bytes_total', + 'container_network_receive_bytes_total', + 'container_network_receive_errors_total', + 'container_network_receive_packets_dropped_total', + 'container_network_receive_packets_total', + 'container_network_transmit_bytes_total', + 'container_network_transmit_errors_total', + 'container_network_transmit_packets_dropped_total', + 'container_network_transmit_packets_total' + ] + AND has(Attributes, 'id') + AND startsWith(Attributes['id'], concat(?, runner_data.runner_id, '-')) + GROUP BY time_bucket_index, metric_name, tcp_state, udp_state, device, failure_type, scope, task_state, interface + ORDER BY time_bucket_index ASC, metric_name + " + ); + + let sum_future = clickhouse + .query(&sum_query) + .bind(&container_id) + .bind(start_seconds) + .bind(interval_seconds) + .bind(start_seconds) + .bind(end_seconds) + .bind(&prefix) + .fetch_all::(); + + let (gauge_rows, sum_rows) = + tokio::try_join!(gauge_future, sum_future).map_err(|err| GlobalError::from(err))?; + + // Map metric types based on query source + let gauge_rows: Vec = gauge_rows + .into_iter() + .map(|row| ProcessedMetricRow { + row, + metric_type: MetricType::Gauge, + }) + .collect(); + + let sum_rows: Vec = sum_rows + .into_iter() + .map(|row| ProcessedMetricRow { + row, + metric_type: MetricType::Counter, + }) + .collect(); + + // Combine both result sets + let mut rows = gauge_rows; + rows.extend(sum_rows); + + // Calculate the number of time buckets we expect + let num_buckets = ((end_seconds - start_seconds) / interval_seconds + 1) as usize; + + // Use HashMap to store metrics with their attributes and types + let mut metrics: HashMap<(String, BTreeMap), (String, Vec)> = + HashMap::new(); + + // Process rows and organize by metric name + attributes + for processed_row in rows { + let row = &processed_row.row; + if row.time_bucket_index >= num_buckets as u32 { + continue; + } + let bucket_idx = row.time_bucket_index as usize; + + // Build attributes map for this row + let mut attributes = BTreeMap::new(); + + // Add non-empty attributes + if !row.tcp_state.is_empty() { + attributes.insert("tcp_state".to_string(), row.tcp_state.clone()); + } + if !row.udp_state.is_empty() { + attributes.insert("udp_state".to_string(), row.udp_state.clone()); + } + if !row.device.is_empty() { + attributes.insert("device".to_string(), row.device.clone()); + } + if !row.failure_type.is_empty() { + attributes.insert("failure_type".to_string(), row.failure_type.clone()); + } + if !row.scope.is_empty() { + attributes.insert("scope".to_string(), row.scope.clone()); + } + if !row.task_state.is_empty() { + attributes.insert("state".to_string(), row.task_state.clone()); + } + if !row.interface.is_empty() { + attributes.insert("interface".to_string(), row.interface.clone()); + } + + // Create metric key (metric name + attributes) + let metric_key = (row.metric_name.clone(), attributes); + + // Initialize metric entry if it doesn't exist + let (_existing_type, metric_values) = metrics.entry(metric_key).or_insert_with(|| { + ( + processed_row.metric_type.as_str().to_string(), + vec![0.0; num_buckets], + ) + }); + + // Add or set the value based on metric type + match processed_row.metric_type { + MetricType::Counter => { + metric_values[bucket_idx] += row.value; + } + MetricType::Gauge => { + metric_values[bucket_idx] = row.value; + } + } + } + + // Convert HashMap to ordered vectors for response + let mut metric_names = Vec::new(); + let mut metric_attributes = Vec::new(); + let mut metric_types = Vec::new(); + let mut metric_values = Vec::new(); + + // Sort metrics by name for consistent ordering + let mut sorted_metrics: Vec<_> = metrics.into_iter().collect(); + sorted_metrics.sort_by(|a, b| a.0 .0.cmp(&b.0 .0)); + + for ((name, attributes), (metric_type, values)) in sorted_metrics { + metric_names.push(name); + // Convert BTreeMap back to HashMap for the API response + let attributes_hashmap: HashMap = attributes.into_iter().collect(); + metric_attributes.push(attributes_hashmap); + metric_types.push(metric_type); + metric_values.push(values); + } + + Ok(models::ContainersGetContainerMetricsResponse { + container_ids: vec![container_id.to_string()], + metric_names, + metric_attributes, + metric_types, + metric_values, + }) +} diff --git a/packages/core/api/actor/src/route/containers/mod.rs b/packages/core/api/actor/src/route/containers/mod.rs new file mode 100644 index 0000000000..9c59e73abc --- /dev/null +++ b/packages/core/api/actor/src/route/containers/mod.rs @@ -0,0 +1,788 @@ +use std::{collections::HashMap, time::Duration}; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use futures_util::{StreamExt, TryStreamExt}; +use rivet_api::{ + apis::{configuration::Configuration, containers_api}, + models, +}; +use rivet_operation::prelude::*; +use serde::Deserialize; +use tracing::Instrument; + +use crate::auth::{Auth, CheckOpts, CheckOutput}; + +use super::{actors::resolve_dc, GlobalQuery}; + +pub mod logs; +pub mod metrics; + +#[derive(Debug, Clone, Deserialize)] +pub struct GlobalEndpointTypeQuery { + #[serde(flatten)] + global: GlobalQuery, + endpoint_type: Option, +} + +// MARK: GET /v1/containers/{} +#[tracing::instrument(skip_all)] +pub async fn get( + ctx: Ctx, + container_id: util::Id, + _watch_index: WatchIndexQuery, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let dcs = if let Some(label) = container_id.label() { + ctx.op(cluster::ops::datacenter::get_for_label::Input { + labels: vec![label], + }) + .await? + .datacenters + } else { + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await? + .datacenters + }; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter for the given container + let mut futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use containers_api::ContainersGetError::*; + match containers_api::containers_get( + &config, + &container_id.to_string(), + query.global.project.as_deref(), + query.global.environment.as_deref(), + query.endpoint_type, + ) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + let mut last_error = None; + + // Return first api response that succeeds + while let Some(result) = futures.next().await { + match result { + Ok(value) => return Ok(value), + Err(err) => last_error = Some(err), + } + } + + // Otherwise return the last error + Err(unwrap!(last_error)) +} + +// MARK: POST /v1/containers +#[tracing::instrument(skip_all)] +pub async fn create( + ctx: Ctx, + body: models::ContainersCreateContainerRequest, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_name_id = resolve_dc(&ctx, cluster_id, body.region.clone()).await?; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc_name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use containers_api::ContainersCreateError::*; + match containers_api::containers_create( + &config, + body, + query.global.project.as_deref(), + query.global.environment.as_deref(), + query.endpoint_type, + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } +} + +// MARK: DELETE /v1/containers/{} +#[derive(Debug, Clone, Deserialize)] +pub struct DeleteQuery { + #[serde(flatten)] + global: GlobalQuery, + override_kill_timeout: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn destroy( + ctx: Ctx, + container_id: util::Id, + query: DeleteQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let dcs = if let Some(label) = container_id.label() { + ctx.op(cluster::ops::datacenter::get_for_label::Input { + labels: vec![label], + }) + .await? + .datacenters + } else { + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await? + .datacenters + }; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let mut futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use containers_api::ContainersDestroyError::*; + match containers_api::containers_destroy( + &config, + &container_id.to_string(), + query.global.project.as_deref(), + query.global.environment.as_deref(), + query.override_kill_timeout, + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + let mut error: Option = None; + + // Return first api response that succeeds + while let Some(result) = futures.next().await { + match result { + Ok(value) => return Ok(value), + Err(err) => { + // Overwrite error if its currently an CONTAINER_NOT_FOUND error or None + if error + .as_ref() + .map(|err| err.is(formatted_error::code::CONTAINER_NOT_FOUND)) + .unwrap_or(true) + { + error = Some(err); + } + } + } + } + + // Otherwise return error + Err(unwrap!(error)) +} + +// MARK: POST /v1/containers/{}/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade( + ctx: Ctx, + container_id: util::Id, + body: models::ContainersUpgradeContainerRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let dcs = if let Some(label) = container_id.label() { + ctx.op(cluster::ops::datacenter::get_for_label::Input { + labels: vec![label], + }) + .await? + .datacenters + } else { + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await? + .datacenters + }; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let mut futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use containers_api::ContainersUpgradeError::*; + match containers_api::containers_upgrade( + &config, + &container_id.to_string(), + body.clone(), + query.project.as_deref(), + query.environment.as_deref(), + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + let mut last_error = None; + + // Return first api response that succeeds + while let Some(result) = futures.next().await { + match result { + Ok(value) => return Ok(value), + Err(err) => last_error = Some(err), + } + } + + // Otherwise return the last error + Err(unwrap!(last_error)) +} + +// MARK: POST /v1/containers/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade_all( + ctx: Ctx, + body: models::ContainersUpgradeAllContainersRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let tags = unwrap_with!(&body.tags, API_BAD_BODY, error = "missing property `tags`"); + + ensure_with!( + tags.as_object().map(|x| x.len()).unwrap_or_default() <= 8, + API_BAD_BODY, + error = "Too many tags (max 8)." + ); + + let tags = unwrap_with!( + serde_json::from_value::>(tags.clone()).ok(), + API_BAD_BODY, + error = "`tags` must be `Map`" + ); + + for (k, v) in &tags { + ensure_with!( + !k.is_empty(), + API_BAD_BODY, + error = "tags[]: Tag label cannot be empty." + ); + ensure_with!( + k.len() <= 32, + API_BAD_BODY, + error = format!( + "tags[{:?}]: Tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ), + ); + ensure_with!( + !v.is_empty(), + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value cannot be empty.") + ); + ensure_with!( + v.len() <= 1024, + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value too large (max 1024 bytes)."), + ); + } + + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await?; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs_res + .datacenters + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + use containers_api::ContainersUpgradeAllError::*; + match containers_api::containers_upgrade_all( + &config, + body.clone(), + query.project.as_deref(), + query.environment.as_deref(), + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)) + .await + { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()), + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + } + }) + .collect::>(); + + // Aggregate results + let count = futures_util::stream::iter(futures) + .buffer_unordered(16) + .try_fold(0, |a, res| std::future::ready(Ok(a + res.count))) + .await?; + + Ok(models::ContainersUpgradeAllContainersResponse { count }) +} + +// MARK: GET /v1/containers +#[derive(Debug, Clone, Deserialize)] +pub struct ListQuery { + #[serde(flatten)] + global_endpoint_type: GlobalEndpointTypeQuery, + tags_json: Option, + include_destroyed: Option, + cursor: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn list_containers( + ctx: Ctx, + _watch_index: WatchIndexQuery, + query: ListQuery, +) -> GlobalResult { + let CheckOutput { game_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global_endpoint_type.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + // Validate tags + if let Some(tags) = &query.tags_json { + let tags = unwrap_with!( + serde_json::from_str::>(tags).ok(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "`tags` must be `Map`" + ); + + ensure_with!( + tags.len() <= 8, + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "Too many tags (max 8)." + ); + + for (k, v) in &tags { + ensure_with!( + !k.is_empty(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "tags_json[]: Tag label cannot be empty." + ); + ensure_with!( + k.len() <= 32, + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = format!( + "tags_json[{:?}]: Tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ), + ); + ensure_with!( + !v.is_empty(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = format!("tags_json[{k:?}]: Tag value cannot be empty.") + ); + ensure_with!( + v.len() <= 1024, + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = format!("tags_json[{k:?}]: Tag value too large (max 1024 bytes)."), + ); + } + } + + // Fetch all datacenters + let clusters_res = ctx + .op(cluster::ops::get_for_game::Input { + game_ids: vec![game_id], + }) + .await?; + let cluster_id = unwrap!(clusters_res.games.first()).cluster_id; + let dc_list_res = ctx + .op(cluster::ops::datacenter::list::Input { + cluster_ids: vec![cluster_id], + }) + .await?; + let cluster = unwrap!(dc_list_res.clusters.into_iter().next()); + let dcs_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: cluster.datacenter_ids, + }) + .await?; + + // Filter the datacenters that can be contacted + let filtered_datacenters = dcs_res + .datacenters + .into_iter() + .filter(|dc| crate::utils::filter_edge_dc(ctx.config(), dc).unwrap_or(false)) + .collect::>(); + + if filtered_datacenters.is_empty() { + bail!("no valid datacenters with worker and guard pools"); + } + + // Query every datacenter + let futures = filtered_datacenters + .into_iter() + .map(|dc| async { + let dc = dc; + + let config = Configuration { + client: rivet_pools::reqwest::client().await?, + base_path: ctx.config().server()?.rivet.edge_api_url_str(&dc.name_id)?, + bearer_access_token: ctx.auth().api_token.clone(), + ..Default::default() + }; + + // Pass the request to the edge api + let timeout_res = tokio::time::timeout( + Duration::from_secs(30), + containers_api::containers_list( + &config, + query.global_endpoint_type.global.project.as_deref(), + query.global_endpoint_type.global.environment.as_deref(), + query.global_endpoint_type.endpoint_type, + query.tags_json.as_deref(), + query.include_destroyed, + query.cursor.as_deref(), + ) + .instrument(tracing::info_span!("proxy_request", base_path=%config.base_path)), + ) + .await; + + use containers_api::ContainersListError::*; + match timeout_res { + Ok(timeout_res) => match timeout_res { + Ok(res) => Ok(res), + Err(rivet_api::apis::Error::ResponseError(content)) => match content.entity { + Some(Status400(body)) + | Some(Status403(body)) + | Some(Status404(body)) + | Some(Status408(body)) + | Some(Status429(body)) + | Some(Status500(body)) => { + return Err(GlobalError::bad_request_builder(&body.code) + .http_status(content.status) + .message(body.message) + .build()) + } + _ => bail!("unknown error: {:?} {:?}", content.status, content.content), + }, + Err(err) => bail!("request error: {err:?}"), + }, + Err(_) => { + tracing::error!(dc=?dc.name_id, "timed out requesting dc"); + bail_with!(API_REQUEST_TIMEOUT); + } + } + }) + .collect::>(); + + let mut results = futures_util::stream::iter(futures) + .buffer_unordered(16) + .collect::>() + .await; + + // Aggregate results + let mut containers = Vec::new(); + for res in &mut results { + match res { + Ok(res) => containers.extend(std::mem::take(&mut res.containers)), + Err(err) => tracing::error!(?err, "failed to request edge dc"), + } + } + + // Error only if all requests failed + if !results.is_empty() && results.iter().all(|res| res.is_err()) { + return Err(unwrap!(unwrap!(results.into_iter().next()).err())); + } + + // Sort by create ts desc + // + // This is an ISO 8601 string and is safely sortable + containers.sort_by_cached_key(|x| std::cmp::Reverse(x.created_at.clone())); + + // Shorten array since returning all containers from all regions could end up returning `regions * + // 32` results, which is a lot. + containers.truncate(32); + + // TODO: Subtracting a ms might skip an container in a rare edge case, need to build compound + // cursor of [created_at, container_id] that we pass to the fdb range + let cursor = containers.last().map(|x| { + let datetime = x + .created_at + .parse::>() + .unwrap_or_default(); + let unix_ts = datetime.timestamp_millis() - 1; + unix_ts.to_string() + }); + + Ok(models::ContainersListContainersResponse { + containers, + pagination: Box::new(models::Pagination { cursor }), + }) +} diff --git a/packages/core/api/actor/src/route/mod.rs b/packages/core/api/actor/src/route/mod.rs index 2901c8fd13..785c10e3f1 100644 --- a/packages/core/api/actor/src/route/mod.rs +++ b/packages/core/api/actor/src/route/mod.rs @@ -7,8 +7,7 @@ use uuid::Uuid; pub mod actors; pub mod builds; -pub mod logs; -pub mod metrics; +pub mod containers; pub mod regions; pub mod routes; @@ -47,6 +46,301 @@ impl GlobalQuery { define_router! { cors: |_config| CorsConfigBuilder::public().build(), + routes: { + "v2" / "actors": { + GET: actors::list_actors( + query: actors::ListQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + POST: actors::create( + query: actors::GlobalEndpointTypeQuery, + body: models::ActorsCreateActorRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v2" / "actors" / "upgrade": { + POST: actors::upgrade_all( + query: GlobalQuery, + body: models::ActorsUpgradeAllActorsRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v2" / "actors" / util::Id: { + GET: actors::get( + query: actors::GlobalEndpointTypeQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + + ), + DELETE: actors::destroy( + query: actors::DeleteQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v2" / "actors" / util::Id / "upgrade": { + POST: actors::upgrade( + query: GlobalQuery, + body: models::ActorsUpgradeActorRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v2" / "actors" / "logs": { + GET: actors::logs::get_logs( + query: actors::logs::GetActorLogsQuery, + opt_auth: true, + ), + }, + + "v2" / "actors" / util::Id / "metrics" / "history": { + GET: actors::metrics::get_metrics( + query: actors::metrics::GetActorMetricsQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 100, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers": { + GET: containers::list_containers( + query: containers::ListQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + POST: containers::create( + query: containers::GlobalEndpointTypeQuery, + body: models::ContainersCreateContainerRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / "upgrade": { + POST: containers::upgrade_all( + query: GlobalQuery, + body: models::ContainersUpgradeAllContainersRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / util::Id: { + GET: containers::get( + query: containers::GlobalEndpointTypeQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + + ), + DELETE: containers::destroy( + query: containers::DeleteQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / util::Id / "upgrade": { + POST: containers::upgrade( + query: GlobalQuery, + body: models::ContainersUpgradeContainerRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / "logs": { + GET: containers::logs::get_logs( + query: containers::logs::GetContainerLogsQuery, + opt_auth: true, + ), + }, + + "v1" / "containers" / util::Id / "metrics" / "history": { + GET: containers::metrics::get_metrics( + query: containers::metrics::GetContainerMetricsQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 100, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "builds": { + GET: builds::list( + query: builds::ListQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "builds" / Uuid: { + GET: builds::get( + query: GlobalQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "builds" / Uuid / "tags": { + PATCH: builds::patch_tags( + query: GlobalQuery, + body: models::BuildsPatchBuildTagsRequest, + opt_auth: true, + ), + }, + + "v1" / "builds" / "prepare": { + POST: builds::create_build( + query: GlobalQuery, + body: models::BuildsPrepareBuildRequest, + opt_auth: true, + ), + }, + + "v1" / "builds" / Uuid / "complete": { + POST: builds::complete_build( + query: GlobalQuery, + body: serde_json::Value, + opt_auth: true, + ), + }, + + // MARK: Regions + "v1" / "regions": { + GET: regions::list( + query: GlobalQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + "v1" / "regions" / "recommend": { + GET: regions::recommend( + query: regions::RecommendQuery, + opt_auth: true, + ), + }, + + // MARK: Routes + "v1" / "routes": { + GET: routes::list( + query: routes::ListQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "routes" / String: { + PUT: routes::update( + query: GlobalQuery, + body: models::RoutesUpdateRouteBody, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + DELETE: routes::delete( + query: GlobalQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + }, + + mounts: [ + { + path: OldRouter, + }, + { + path: OldRouter, + prefix: "/v1" + }, + ], +} + +define_router! { + name: OldRouter, routes: { "actors": { GET: actors::list_actors( @@ -167,8 +461,8 @@ define_router! { }, "actors" / "logs": { - GET: logs::get_logs( - query: logs::GetActorLogsQuery, + GET: actors::logs::get_logs( + query: actors::logs::GetActorLogsQuery, opt_auth: true, ), }, @@ -185,9 +479,9 @@ define_router! { ), }, - "actors" / Uuid / "metrics" / "history": { - GET: metrics::get_metrics( - query: metrics::GetActorMetricsQuery, + "actors" / util::Id / "metrics" / "history": { + GET: actors::metrics::get_metrics( + query: actors::metrics::GetActorMetricsQuery, opt_auth: true, rate_limit: { buckets: [ @@ -320,15 +614,15 @@ define_router! { }, "games" / Uuid / "environments" / Uuid / "servers": { - GET: actors::list_servers_deprecated( - query: actors::ListQuery, + GET: actors::v1::list_servers_deprecated( + query: actors::v1::ListQuery, rate_limit: { buckets: [ { count: 60_000, bucket: duration::minutes(1) }, ], }, ), - POST: actors::create_deprecated( + POST: actors::v1::create_deprecated( body: models::ServersCreateServerRequest, rate_limit: { buckets: [ @@ -339,7 +633,7 @@ define_router! { }, "games" / Uuid / "environments" / Uuid / "servers" / Uuid: { - GET: actors::get_deprecated( + GET: actors::v1::get_deprecated( rate_limit: { buckets: [ { count: 60_000, bucket: duration::minutes(1) }, @@ -347,8 +641,8 @@ define_router! { }, ), - DELETE: actors::destroy_deprecated( - query: actors::DeleteQuery, + DELETE: actors::v1::destroy_deprecated( + query: actors::v1::DeleteQuery, rate_limit: { buckets: [ { count: 10_000, bucket: duration::minutes(1) }, @@ -358,8 +652,8 @@ define_router! { }, "games" / Uuid / "environments" / Uuid / "servers" / Uuid / "logs" : { - GET: logs::get_logs_deprecated( - query: logs::GetActorLogsQuery, + GET: actors::v1::logs::get_logs_deprecated( + query: actors::v1::logs::GetActorLogsQuery, ), }, diff --git a/packages/core/api/actor/src/route/regions.rs b/packages/core/api/actor/src/route/regions.rs index baba18656f..1401dbd824 100644 --- a/packages/core/api/actor/src/route/regions.rs +++ b/packages/core/api/actor/src/route/regions.rs @@ -11,7 +11,7 @@ use crate::{ use super::GlobalQuery; -// MARK: GET /regions +// MARK: GET /v1/regions #[tracing::instrument(skip_all)] pub async fn list( ctx: Ctx, @@ -124,7 +124,7 @@ pub async fn list_deprecated( Ok(models::ServersListDatacentersResponse { datacenters }) } -// MARK: GET /regions/recommend +// MARK: GET /v1/regions/recommend #[derive(Debug, Clone, Deserialize)] pub struct RecommendQuery { #[serde(flatten)] diff --git a/packages/core/api/status/src/route/actor.rs b/packages/core/api/status/src/route/actor.rs index d19258840a..bde9a53b8a 100644 --- a/packages/core/api/status/src/route/actor.rs +++ b/packages/core/api/status/src/route/actor.rs @@ -164,7 +164,7 @@ pub async fn status( ..Default::default() }; - let body = models::ActorsCreateActorRequest { + let body = models::ActorsV1CreateActorRequest { tags: Some(serde_json::json!({ "name": query.build.build_name(), })), @@ -173,12 +173,12 @@ pub async fn status( "current": "true", }))), region: Some(dc.name_id.clone()), - network: Some(Box::new(models::ActorsCreateActorNetworkRequest { + network: Some(Box::new(models::ActorsV1CreateActorNetworkRequest { ports: Some(HashMap::from([( "http".to_string(), - models::ActorsCreateActorPortRequest { - protocol: models::ActorsPortProtocol::Https, - routing: Some(Box::new(models::ActorsPortRouting { + models::ActorsV1CreateActorPortRequest { + protocol: models::ActorsV1PortProtocol::Https, + routing: Some(Box::new(models::ActorsV1PortRouting { guard: Some(serde_json::json!({})), host: None, })), @@ -187,14 +187,14 @@ pub async fn status( )])), ..Default::default() })), - lifecycle: Some(Box::new(models::ActorsLifecycle { + lifecycle: Some(Box::new(models::ActorsV1Lifecycle { // Don't reboot on failure durable: Some(false), ..Default::default() })), resources: match &query.build { StatusQueryBuild::WsIsolate => None, - StatusQueryBuild::WsContainer => Some(Box::new(models::ActorsResources { + StatusQueryBuild::WsContainer => Some(Box::new(models::ActorsV1Resources { cpu: 100, memory: 128, })), @@ -204,8 +204,8 @@ pub async fn status( tracing::info!("creating actor"); // Pass the request to the edge api - use actors_api::ActorsCreateError::*; - let res = match actors_api::actors_create( + use actors_v1_api::ActorsV1CreateError::*; + let res = match actors_v1_api::actors_v1_create( &config, body, Some(&system_test_project), @@ -252,8 +252,8 @@ pub async fn status( let port = unwrap!(res.actor.network.ports.get("http"), "missing http protocol"); let protocol = match port.protocol { - models::ActorsPortProtocol::Http | models::ActorsPortProtocol::Tcp => "http", - models::ActorsPortProtocol::Https => "https", + models::ActorsV1PortProtocol::Http | models::ActorsV1PortProtocol::Tcp => "http", + models::ActorsV1PortProtocol::Https => "https", _ => bail!("unsupported protocol"), }; let hostname = unwrap_ref!(port.hostname); @@ -269,8 +269,8 @@ pub async fn status( // Destroy actor regardless of connection status { - use actors_api::ActorsDestroyError::*; - match actors_api::actors_destroy( + use actors_v1_api::ActorsV1DestroyError::*; + match actors_v1_api::actors_v1_destroy( &config, &actor_id.to_string(), Some(&system_test_project), diff --git a/packages/core/services/cluster/src/workflows/server/install/install_scripts/files/cadvisor_metric_exporter.sh b/packages/core/services/cluster/src/workflows/server/install/install_scripts/files/cadvisor_metric_exporter.sh index 6e5a3d5f1c..8b965b26ba 100644 --- a/packages/core/services/cluster/src/workflows/server/install/install_scripts/files/cadvisor_metric_exporter.sh +++ b/packages/core/services/cluster/src/workflows/server/install/install_scripts/files/cadvisor_metric_exporter.sh @@ -28,7 +28,7 @@ ExecStart=/usr/bin/cadvisor \ --prometheus_endpoint="/metrics" \ --disable_metrics=memory_numa,disk,advtcp,accelerator,hugetlb,referenced_memory,resctrl \ --docker_only=false \ - --raw_cgroup_prefix_whitelist=/system.slice/pegboard-actor- + --raw_cgroup_prefix_whitelist=/system.slice/pegboard-runner- [Install] WantedBy=multi-user.target diff --git a/packages/edge/api/actor/src/assert.rs b/packages/edge/api/actor/src/assert.rs index e9052c6a61..03c3474811 100644 --- a/packages/edge/api/actor/src/assert.rs +++ b/packages/edge/api/actor/src/assert.rs @@ -4,6 +4,29 @@ use rivet_operation::prelude::*; use crate::auth::Auth; +/// Validates that an actor belongs to the given game ID. +pub async fn actor_for_env_v1( + ctx: &Ctx, + actor_id: Uuid, + _game_id: Uuid, + env_id: Uuid, + endpoint_type: Option, +) -> GlobalResult { + let actors_res = ctx + .op(pegboard::ops::actor::v1::get::Input { + actor_ids: vec![actor_id], + endpoint_type, + allow_errors: false, + }) + .await?; + let actor = unwrap_with!(actors_res.actors.into_iter().next(), ACTOR_NOT_FOUND); + + // Validate token can access actor + ensure_with!(actor.env_id == env_id, ACTOR_NOT_FOUND); + + Ok(actor) +} + /// Validates that an actor belongs to the given game ID. pub async fn actor_for_env( ctx: &Ctx, @@ -26,3 +49,26 @@ pub async fn actor_for_env( Ok(actor) } + +/// Validates that an container belongs to the given game ID. +pub async fn container_for_env( + ctx: &Ctx, + container_id: util::Id, + _game_id: Uuid, + env_id: Uuid, + endpoint_type: Option, +) -> GlobalResult { + let actors_res = ctx + .op(pegboard::ops::actor::get::Input { + actor_ids: vec![container_id], + endpoint_type, + allow_errors: false, + }) + .await?; + let actor = unwrap_with!(actors_res.actors.into_iter().next(), CONTAINER_NOT_FOUND); + + // Validate token can access container + ensure_with!(actor.env_id == env_id, CONTAINER_NOT_FOUND); + + Ok(actor) +} diff --git a/packages/edge/api/actor/src/route/actors.rs b/packages/edge/api/actor/src/route/actors/mod.rs similarity index 56% rename from packages/edge/api/actor/src/route/actors.rs rename to packages/edge/api/actor/src/route/actors/mod.rs index d12929fdfa..17b0d0bf26 100644 --- a/packages/edge/api/actor/src/route/actors.rs +++ b/packages/edge/api/actor/src/route/actors/mod.rs @@ -16,6 +16,8 @@ use crate::{ use super::GlobalQuery; +pub mod v1; + #[derive(Debug, Clone, Deserialize)] pub struct GlobalEndpointTypeQuery { #[serde(flatten)] @@ -23,20 +25,11 @@ pub struct GlobalEndpointTypeQuery { endpoint_type: Option, } -// MARK: GET /actors/{} +// MARK: GET /v2/actors/{} #[tracing::instrument(skip_all)] pub async fn get( ctx: Ctx, actor_id: util::Id, - watch_index: WatchIndexQuery, - query: GlobalEndpointTypeQuery, -) -> GlobalResult { - get_inner(&ctx, actor_id, watch_index, query).await -} - -async fn get_inner( - ctx: &Ctx, - actor_id: util::Id, _watch_index: WatchIndexQuery, query: GlobalEndpointTypeQuery, ) -> GlobalResult { @@ -79,7 +72,7 @@ async fn get_inner( }) } -// MARK: POST /actors +// MARK: POST /v2/actors #[tracing::instrument(skip_all)] pub async fn create( ctx: Ctx, @@ -119,6 +112,10 @@ pub async fn create( error = "`tags` must be `Map`" ); + if let build::types::BuildAllocationType::None = build.allocation_type { + // todo unsupported build allocation type, use v1 endpoint. + } + let network = body.network.unwrap_or_default(); let endpoint_type = body .runtime @@ -127,241 +124,110 @@ pub async fn create( .map(|n| n.endpoint_type) .map(ApiInto::api_into); - let actor_id = if let build::types::BuildAllocationType::None = build.allocation_type { - let actor_id = Uuid::new_v4(); - tracing::info!(?actor_id, ?tags, "creating actor with tags"); - - let resources = match build.kind { - build::types::BuildKind::DockerImage | build::types::BuildKind::OciBundle => { - let resources = unwrap_with!( - body.resources, - API_BAD_BODY, - error = "`resources` must be set for actors using Docker builds" - ); + let actor_id = util::Id::new_v1(ctx.config().server()?.rivet.edge()?.datacenter_label()); + tracing::info!(?actor_id, ?tags, "creating actor with tags"); - (*resources).api_into() - } - build::types::BuildKind::JavaScript => { - ensure_with!( - body.resources.is_none(), - API_BAD_BODY, - error = "actors using JavaScript builds cannot set `resources`" - ); - - pegboard::types::ActorResources::default_isolate() - } - }; - - let created_fut = if network.wait_ready.unwrap_or_default() { - std::future::pending().boxed() - } else { - let mut created_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - - async move { created_sub.next().await }.boxed() - }; - let mut ready_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut fail_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut destroy_sub = ctx - .subscribe::(("actor_id", actor_id)) + let created_fut = if network.wait_ready.unwrap_or_default() { + std::future::pending().boxed() + } else { + let mut created_sub = ctx + .subscribe::(("actor_id", actor_id)) .await?; - ctx.workflow(pegboard::workflows::actor::Input { - actor_id, - env_id, - tags, - resources, - lifecycle: body.lifecycle.map(|x| (*x).api_into()).unwrap_or_else(|| { - pegboard::types::ActorLifecycle { - kill_timeout_ms: 0, - durable: false, - } - }), - image_id: build.build_id, - root_user_enabled: game_config.root_user_enabled, - // args: body.runtime.arguments.unwrap_or_default(), - args: Vec::new(), - network_mode: network.mode.unwrap_or_default().api_into(), - environment: body.runtime.and_then(|r| r.environment).unwrap_or_default().as_hashable(), - network_ports: network - .ports - .unwrap_or_default() - .into_iter() - .map(|(s, p)| GlobalResult::Ok(( - s.clone(), - pegboard::workflows::actor::Port { - internal_port: p.internal_port.map(TryInto::try_into).transpose()?, - routing: if let Some(routing) = p.routing { - match *routing { - models::ActorsPortRouting { - guard: Some(_gg), - host: None, - } => pegboard::types::Routing::GameGuard { - protocol: p.protocol.api_into(), - }, - models::ActorsPortRouting { - guard: None, - host: Some(_), - } => pegboard::types::Routing::Host { - protocol: match p.protocol.api_try_into() { - Err(err) if GlobalError::is(&err, formatted_error::code::ACTOR_FAILED_TO_CREATE) => { - // Add location - bail_with!( - ACTOR_FAILED_TO_CREATE, - error = format!("network.ports[{s:?}].protocol: Host port protocol must be either TCP or UDP.") - ); - } - x => x?, - }, - }, - models::ActorsPortRouting { .. } => { - bail_with!( - ACTOR_FAILED_TO_CREATE, - error = format!("network.ports[{s:?}].routing: Must specify either `guard` or `host` routing type.") - ); - } - } - } else { - pegboard::types::Routing::GameGuard { - protocol: p.protocol.api_into(), - } - } - } - ))) - .collect::>>()?, - endpoint_type, - }) - .tag("actor_id", actor_id) - .dispatch() + async move { created_sub.next().await }.boxed() + }; + let mut ready_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut fail_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut destroy_sub = ctx + .subscribe::(("actor_id", actor_id)) .await?; - // Wait for create/ready, fail, or destroy - tokio::select! { - res = created_fut => { res?; }, - res = ready_sub.next() => { res?; }, - res = fail_sub.next() => { - let msg = res?; - bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); - } - res = destroy_sub.next() => { - res?; - bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); - } - } - - util::Id::from(actor_id) - } else { - let actor_id = util::Id::new_v1(ctx.config().server()?.rivet.edge()?.datacenter_label()); - tracing::info!(?actor_id, ?tags, "creating actor with tags"); - - let created_fut = if network.wait_ready.unwrap_or_default() { - std::future::pending().boxed() - } else { - let mut created_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - - async move { created_sub.next().await }.boxed() - }; - let mut ready_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut fail_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut destroy_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - ctx.workflow(pegboard::workflows::actor2::Input { - actor_id, - env_id, - tags, - resources: body.resources.map(|x| (*x).api_into()), - lifecycle: body.lifecycle.map(|x| (*x).api_into()).unwrap_or_else(|| { - pegboard::types::ActorLifecycle { - kill_timeout_ms: 0, - durable: false, - } - }), - image_id: build.build_id, - root_user_enabled: game_config.root_user_enabled, - // args: body.runtime.arguments.unwrap_or_default(), - args: Vec::new(), - network_mode: network.mode.unwrap_or_default().api_into(), - environment: body.runtime.and_then(|r| r.environment).unwrap_or_default().as_hashable(), - network_ports: network - .ports - .unwrap_or_default() - .into_iter() - .map(|(s, p)| GlobalResult::Ok(( - s.clone(), - pegboard::workflows::actor2::Port { - internal_port: p.internal_port.map(TryInto::try_into).transpose()?, - routing: if let Some(routing) = p.routing { - match *routing { - models::ActorsPortRouting { - guard: Some(_gg), - host: None, - } => pegboard::types::Routing::GameGuard { - protocol: p.protocol.api_into(), - }, - models::ActorsPortRouting { - guard: None, - host: Some(_), - } => pegboard::types::Routing::Host { - protocol: match p.protocol.api_try_into() { - Err(err) if GlobalError::is(&err, formatted_error::code::ACTOR_FAILED_TO_CREATE) => { - // Add location - bail_with!( - ACTOR_FAILED_TO_CREATE, - error = format!("network.ports[{s:?}].protocol: Host port protocol must be either TCP or UDP.") - ); - } - x => x?, - }, - }, - models::ActorsPortRouting { .. } => { - bail_with!( - ACTOR_FAILED_TO_CREATE, - error = format!("network.ports[{s:?}].routing: Must specify either `guard` or `host` routing type.") - ); - } - } - } else { - pegboard::types::Routing::GameGuard { + ctx.workflow(pegboard::workflows::actor::Input { + actor_id, + env_id, + tags, + resources: None, + lifecycle: body.lifecycle.map(|x| (*x).api_into()).unwrap_or_else(|| { + pegboard::types::ActorLifecycle { + kill_timeout_ms: 0, + durable: false, + } + }), + image_id: build.build_id, + root_user_enabled: game_config.root_user_enabled, + // args: body.runtime.arguments.unwrap_or_default(), + args: Vec::new(), + network_mode: network.mode.unwrap_or_default().api_into(), + environment: body.runtime.and_then(|r| r.environment).unwrap_or_default().as_hashable(), + network_ports: network + .ports + .unwrap_or_default() + .into_iter() + .map(|(s, p)| GlobalResult::Ok(( + s.clone(), + pegboard::workflows::actor::Port { + internal_port: p.internal_port.map(TryInto::try_into).transpose()?, + routing: if let Some(routing) = p.routing { + match *routing { + models::ActorsPortRouting { + guard: Some(_gg), + host: None, + } => pegboard::types::Routing::GameGuard { protocol: p.protocol.api_into(), + }, + models::ActorsPortRouting { + guard: None, + host: Some(_), + } => pegboard::types::Routing::Host { + protocol: match p.protocol.api_try_into() { + Err(err) if GlobalError::is(&err, formatted_error::code::ACTOR_FAILED_TO_CREATE) => { + // Add location + bail_with!( + ACTOR_FAILED_TO_CREATE, + error = format!("network.ports[{s:?}].protocol: Host port protocol must be either TCP or UDP.") + ); + } + x => x?, + }, + }, + models::ActorsPortRouting { .. } => { + bail_with!( + ACTOR_FAILED_TO_CREATE, + error = format!("network.ports[{s:?}].routing: Must specify either `guard` or `host` routing type.") + ); } } + } else { + pegboard::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + } } - ))) - .collect::>>()?, - endpoint_type, - }) - .tag("actor_id", actor_id) - .dispatch() - .await?; + } + ))) + .collect::>>()?, + endpoint_type, + }) + .tag("actor_id", actor_id) + .dispatch() + .await?; - // Wait for create/ready, fail, or destroy - tokio::select! { - res = created_fut => { res?; }, - res = ready_sub.next() => { res?; }, - res = fail_sub.next() => { - let msg = res?; - bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); - } - res = destroy_sub.next() => { - res?; - bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); - } + // Wait for create/ready, fail, or destroy + tokio::select! { + res = created_fut => { res?; }, + res = ready_sub.next() => { res?; }, + res = fail_sub.next() => { + let msg = res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); } - - actor_id - }; + res = destroy_sub.next() => { + res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); + } + } let actors_res = ctx .op(pegboard::ops::actor::get::Input { @@ -385,7 +251,7 @@ pub async fn create( }) } -// MARK: DELETE /actors/{} +// MARK: DELETE /v2/actors/{} #[derive(Debug, Clone, Deserialize)] pub struct DeleteQuery { #[serde(flatten)] @@ -425,9 +291,6 @@ pub async fn destroy( ); let mut sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut old_sub = ctx .subscribe::(("actor_id", actor_id)) .await?; @@ -439,37 +302,20 @@ pub async fn destroy( return Ok(json!({})); } - // Try actor2 first - let res = ctx - .signal(pegboard::workflows::actor2::Destroy { - override_kill_timeout_ms: query.override_kill_timeout, - }) - .to_workflow::() - .tag("actor_id", actor_id) - .send() - .await; - - if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { - // Try old actors - ctx.signal(pegboard::workflows::actor::Destroy { - override_kill_timeout_ms: query.override_kill_timeout, - }) - .to_workflow::() - .tag("actor_id", actor_id) - .send() - .await?; - - old_sub.next().await?; - } else { - res?; + ctx.signal(pegboard::workflows::actor::Destroy { + override_kill_timeout_ms: query.override_kill_timeout, + }) + .to_workflow::() + .tag("actor_id", actor_id) + .send() + .await?; - sub.next().await?; - } + sub.next().await?; Ok(json!({})) } -// MARK: POST /actors/{}/upgrade +// MARK: POST /v2/actors/{}/upgrade #[tracing::instrument(skip_all)] pub async fn upgrade( ctx: Ctx, @@ -501,33 +347,18 @@ pub async fn upgrade( ) .await?; - // Try actor2 first - let res = ctx - .signal(pegboard::workflows::actor2::Upgrade { - image_id: build.build_id, - }) - .to_workflow::() - .tag("actor_id", actor_id) - .send() - .await; - - if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { - // Try old actors - ctx.signal(pegboard::workflows::actor::Upgrade { - image_id: build.build_id, - }) - .to_workflow::() - .tag("actor_id", actor_id) - .send() - .await?; - } else { - res?; - } + ctx.signal(pegboard::workflows::actor::Upgrade { + image_id: build.build_id, + }) + .to_workflow::() + .tag("actor_id", actor_id) + .send() + .await?; Ok(json!({})) } -// MARK: POST /actors/upgrade +// MARK: POST /v2/actors/upgrade #[tracing::instrument(skip_all)] pub async fn upgrade_all( ctx: Ctx, @@ -619,33 +450,12 @@ pub async fn upgrade_all( let ctx = (*ctx).clone(); futures_util::stream::iter(list_res.actors) .map(|actor| { - let ctx = ctx.clone(); - async move { - // Try actor2 first - let res = ctx - .signal(pegboard::workflows::actor2::Upgrade { - image_id: build.build_id, - }) - .to_workflow::() - .tag("actor_id", actor.actor_id) - .send() - .await; - - if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { - // Try old actors - ctx.signal(pegboard::workflows::actor::Upgrade { - image_id: build.build_id, - }) - .to_workflow::() - .tag("actor_id", actor.actor_id) - .send() - .await?; - } else { - res?; - } - - GlobalResult::Ok(()) - } + ctx.signal(pegboard::workflows::actor::Upgrade { + image_id: build.build_id, + }) + .to_workflow::() + .tag("actor_id", actor.actor_id) + .send() }) .buffer_unordered(32) .try_collect::>() @@ -661,7 +471,7 @@ pub async fn upgrade_all( }) } -// MARK: GET /actors +// MARK: GET /v2/actors #[derive(Debug, Clone, Deserialize)] pub struct ListQuery { #[serde(flatten)] @@ -675,14 +485,6 @@ pub struct ListQuery { #[tracing::instrument(skip_all)] pub async fn list_actors( ctx: Ctx, - watch_index: WatchIndexQuery, - query: ListQuery, -) -> GlobalResult { - list_actors_inner(&ctx, watch_index, query).await -} - -async fn list_actors_inner( - ctx: &Ctx, _watch_index: WatchIndexQuery, query: ListQuery, ) -> GlobalResult { @@ -757,7 +559,7 @@ async fn list_actors_inner( .collect::>>()?; Ok(models::ActorsListActorsResponse { - actors: actors, + actors, pagination: Box::new(models::Pagination { cursor }), }) } diff --git a/packages/edge/api/actor/src/route/actors/v1.rs b/packages/edge/api/actor/src/route/actors/v1.rs new file mode 100644 index 0000000000..3e5c1c1c37 --- /dev/null +++ b/packages/edge/api/actor/src/route/actors/v1.rs @@ -0,0 +1,690 @@ +use std::collections::HashMap; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use chirp_workflow::prelude::*; +use futures_util::{FutureExt, StreamExt, TryStreamExt}; +use rivet_api::models; +use rivet_convert::{ApiInto, ApiTryInto}; +use serde::Deserialize; +use serde_json::json; +use util::serde::AsHashableExt; + +use crate::{ + assert, + auth::{Auth, CheckOpts, CheckOutput}, +}; + +use super::GlobalQuery; + +#[derive(Debug, Clone, Deserialize)] +pub struct GlobalEndpointTypeQuery { + #[serde(flatten)] + global: GlobalQuery, + endpoint_type: Option, +} + +// MARK: GET /actors/{} +#[tracing::instrument(skip_all)] +pub async fn get( + ctx: Ctx, + actor_id: Uuid, + _watch_index: WatchIndexQuery, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { env_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + // Get the actor + let actors_res = ctx + .op(pegboard::ops::actor::v1::get::Input { + actor_ids: vec![actor_id], + endpoint_type: query.endpoint_type.map(ApiInto::api_into), + allow_errors: false, + }) + .await?; + let actor = unwrap_with!(actors_res.actors.first(), ACTOR_NOT_FOUND); + + // Get the datacenter + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + // Validate token can access actor + ensure_with!(actor.env_id == env_id, ACTOR_NOT_FOUND); + + Ok(models::ActorsV1GetActorResponse { + actor: Box::new(pegboard::types::v1::convert_actor_to_api( + actor.clone(), + dc, + )?), + }) +} + +// MARK: POST /actors +#[tracing::instrument(skip_all)] +pub async fn create( + ctx: Ctx, + body: models::ActorsV1CreateActorRequest, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let (game_configs_res, build) = tokio::try_join!( + ctx.op(pegboard::ops::game_config::get::Input { + game_ids: vec![game_id], + }), + resolve_build( + &ctx, + game_id, + env_id, + body.build, + body.build_tags.flatten(), + false + ), + )?; + let game_config = unwrap!(game_configs_res.game_configs.first()); + + let tags = unwrap_with!( + serde_json::from_value(body.tags.unwrap_or_default()).ok(), + API_BAD_BODY, + error = "`tags` must be `Map`" + ); + + if let build::types::BuildAllocationType::None = build.allocation_type { + // todo unsupported build allocation type, use v2 endpoint. + } + + let network = body.network.unwrap_or_default(); + let endpoint_type = body + .runtime + .as_ref() + .and_then(|r| r.network.as_ref()) + .map(|n| n.endpoint_type) + .map(ApiInto::api_into); + + let actor_id = Uuid::new_v4(); + tracing::info!(?actor_id, ?tags, "creating actor with tags"); + + let resources = match build.kind { + build::types::BuildKind::DockerImage | build::types::BuildKind::OciBundle => { + let resources = unwrap_with!( + body.resources, + API_BAD_BODY, + error = "`resources` must be set for actors using Docker builds" + ); + + (*resources).api_into() + } + build::types::BuildKind::JavaScript => { + ensure_with!( + body.resources.is_none(), + API_BAD_BODY, + error = "actors using JavaScript builds cannot set `resources`" + ); + + pegboard::types::ActorResources::default_isolate() + } + }; + + let created_fut = if network.wait_ready.unwrap_or_default() { + std::future::pending().boxed() + } else { + let mut created_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + + async move { created_sub.next().await }.boxed() + }; + let mut ready_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut fail_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut destroy_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + + ctx.workflow(pegboard::workflows::actor::v1::Input { + actor_id, + env_id, + tags, + resources, + lifecycle: body.lifecycle.map(|x| (*x).api_into()).unwrap_or_else(|| { + pegboard::types::ActorLifecycle { + kill_timeout_ms: 0, + durable: false, + } + }), + image_id: build.build_id, + root_user_enabled: game_config.root_user_enabled, + // args: body.runtime.arguments.unwrap_or_default(), + args: Vec::new(), + network_mode: network.mode.unwrap_or_default().api_into(), + environment: body.runtime.and_then(|r| r.environment).unwrap_or_default().as_hashable(), + network_ports: network + .ports + .unwrap_or_default() + .into_iter() + .map(|(s, p)| GlobalResult::Ok(( + s.clone(), + pegboard::workflows::actor::v1::Port { + internal_port: p.internal_port.map(TryInto::try_into).transpose()?, + routing: if let Some(routing) = p.routing { + match *routing { + models::ActorsV1PortRouting { + guard: Some(_gg), + host: None, + } => pegboard::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + }, + models::ActorsV1PortRouting { + guard: None, + host: Some(_), + } => pegboard::types::Routing::Host { + protocol: match p.protocol.api_try_into() { + Err(err) if GlobalError::is(&err, formatted_error::code::ACTOR_FAILED_TO_CREATE) => { + // Add location + bail_with!( + ACTOR_FAILED_TO_CREATE, + error = format!("network.ports[{s:?}].protocol: Host port protocol must be either TCP or UDP.") + ); + } + x => x?, + }, + }, + models::ActorsV1PortRouting { .. } => { + bail_with!( + ACTOR_FAILED_TO_CREATE, + error = format!("network.ports[{s:?}].routing: Must specify either `guard` or `host` routing type.") + ); + } + } + } else { + pegboard::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + } + } + } + ))) + .collect::>>()?, + endpoint_type, + }) + .tag("actor_id", actor_id) + .dispatch() + .await?; + // Wait for create/ready, fail, or destroy + tokio::select! { + res = created_fut => { res?; }, + res = ready_sub.next() => { res?; }, + res = fail_sub.next() => { + let msg = res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); + } + res = destroy_sub.next() => { + res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); + } + } + + let actors_res = ctx + .op(pegboard::ops::actor::v1::get::Input { + actor_ids: vec![actor_id], + endpoint_type: query.endpoint_type.map(ApiInto::api_into), + allow_errors: false, + }) + .await?; + let actor = unwrap_with!(actors_res.actors.first(), ACTOR_NOT_FOUND); + + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + Ok(models::ActorsV1CreateActorResponse { + actor: Box::new(pegboard::types::v1::convert_actor_to_api( + actor.clone(), + dc, + )?), + }) +} + +// MARK: DELETE /actors/{} +#[derive(Debug, Clone, Deserialize)] +pub struct DeleteQuery { + #[serde(flatten)] + global: GlobalQuery, + override_kill_timeout: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn destroy( + ctx: Ctx, + actor_id: Uuid, + query: DeleteQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + ensure_with!( + query.override_kill_timeout.unwrap_or(0) >= 0, + API_BAD_QUERY_PARAMETER, + parameter = "override_kill_timeout", + error = "must be positive" + ); + ensure_with!( + query.override_kill_timeout.unwrap_or(0) < 2 * 60 * 60 * 1000, + API_BAD_QUERY_PARAMETER, + parameter = "override_kill_timeout", + error = "cannot be longer than 2 hours" + ); + + let mut sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + + // Get actor after sub is created + let actor = assert::actor_for_env_v1(&ctx, actor_id, game_id, env_id, None).await?; + + // Already destroyed + if actor.destroy_ts.is_some() { + return Ok(json!({})); + } + + ctx.signal(pegboard::workflows::actor::v1::Destroy { + override_kill_timeout_ms: query.override_kill_timeout, + }) + .to_workflow::() + .tag("actor_id", actor_id) + .send() + .await?; + + sub.next().await?; + + Ok(json!({})) +} + +// MARK: POST /actors/{}/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade( + ctx: Ctx, + actor_id: Uuid, + body: models::ActorsV1UpgradeActorRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + assert::actor_for_env_v1(&ctx, actor_id, game_id, env_id, None).await?; + + let build = resolve_build( + &ctx, + game_id, + env_id, + body.build, + body.build_tags.flatten(), + true, + ) + .await?; + + ctx.signal(pegboard::workflows::actor::v1::Upgrade { + image_id: build.build_id, + }) + .to_workflow::() + .tag("actor_id", actor_id) + .send() + .await?; + + Ok(json!({})) +} + +// MARK: POST /actors/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade_all( + ctx: Ctx, + body: models::ActorsV1UpgradeAllActorsRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let tags = unwrap_with!(body.tags, API_BAD_BODY, error = "missing property `tags`"); + + ensure_with!( + tags.as_object().map(|x| x.len()).unwrap_or_default() <= 8, + API_BAD_BODY, + error = "Too many tags (max 8)." + ); + + let tags = unwrap_with!( + serde_json::from_value::>(tags).ok(), + API_BAD_BODY, + error = "`tags` must be `Map`" + ); + + for (k, v) in &tags { + ensure_with!( + !k.is_empty(), + API_BAD_BODY, + error = "tags[]: Tag label cannot be empty." + ); + ensure_with!( + k.len() <= 32, + API_BAD_BODY, + error = format!( + "tags[{:?}]: Tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ), + ); + ensure_with!( + !v.is_empty(), + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value cannot be empty.") + ); + ensure_with!( + v.len() <= 1024, + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value too large (max 1024 bytes)."), + ); + } + + let build = resolve_build( + &ctx, + game_id, + env_id, + body.build, + body.build_tags.flatten(), + true, + ) + .await?; + + // Work in batches + let mut count = 0; + let mut created_before = None; + loop { + let list_res = ctx + .op(pegboard::ops::actor::v1::list_for_env::Input { + env_id, + tags: tags.clone(), + include_destroyed: false, + created_before, + limit: 10_000, + }) + .await?; + + count += list_res.actors.len(); + + // TODO: Subtracting a ms might skip an actor in a rare edge case, need to build compound + // cursor of [created_at, actor_id] that we pass to the fdb range + created_before = list_res.actors.last().map(|x| x.create_ts - 1); + + let ctx = (*ctx).clone(); + futures_util::stream::iter(list_res.actors) + .map(|actor| { + let ctx = ctx.clone(); + + ctx.signal(pegboard::workflows::actor::v1::Upgrade { + image_id: build.build_id, + }) + .to_workflow::() + .tag("actor_id", actor.actor_id) + .send() + }) + .buffer_unordered(32) + .try_collect::>() + .await?; + + if count < 10_000 { + break; + } + } + + Ok(models::ActorsV1UpgradeAllActorsResponse { + count: count.try_into()?, + }) +} + +// MARK: GET /actors +#[derive(Debug, Clone, Deserialize)] +pub struct ListQuery { + #[serde(flatten)] + global_endpoint_type: GlobalEndpointTypeQuery, + tags_json: Option, + include_destroyed: Option, + /// Before create timestamp + cursor: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn list_actors( + ctx: Ctx, + _watch_index: WatchIndexQuery, + query: ListQuery, +) -> GlobalResult { + let CheckOutput { env_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global_endpoint_type.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let created_before = query.cursor.map(|x| x.parse::()).transpose()?; + + let include_destroyed = query.include_destroyed.unwrap_or(false); + + let tags = unwrap_with!( + query + .tags_json + .as_deref() + .map_or(Ok(HashMap::new()), serde_json::from_str) + .ok(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "must be `Map`" + ); + + let list_res = ctx + .op(pegboard::ops::actor::v1::list_for_env::Input { + env_id, + tags, + include_destroyed, + created_before, + limit: 32, + }) + .await?; + + let mut actors_res = ctx + .op(pegboard::ops::actor::v1::get::Input { + actor_ids: list_res.actors.iter().map(|x| x.actor_id).collect(), + endpoint_type: query + .global_endpoint_type + .endpoint_type + .map(ApiInto::api_into), + allow_errors: false, + }) + .await?; + actors_res.actors.sort_by_key(|x| -x.create_ts); + + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + // TODO: Subtracting a ms might skip an actor in a rare edge case, need to build compound + // cursor of [created_at, actor_id] that we pass to the fdb range + let cursor = actors_res + .actors + .last() + .map(|x| (x.create_ts - 1).to_string()); + + let actors = actors_res + .actors + .into_iter() + .map(|a| pegboard::types::v1::convert_actor_to_api(a, &dc)) + .collect::>>()?; + + Ok(models::ActorsV1ListActorsResponse { + actors, + pagination: Box::new(models::Pagination { cursor }), + }) +} + +#[tracing::instrument(skip_all, fields(%game_id, %env_id, ?build_id, bypass_cache))] +async fn resolve_build( + ctx: &Ctx, + game_id: Uuid, + env_id: Uuid, + build_id: Option, + build_tags: Option, + bypass_cache: bool, +) -> GlobalResult { + match (build_id, build_tags) { + (Some(build_id), None) => { + let builds_res = ctx + .op(build::ops::get::Input { + build_ids: vec![build_id], + }) + .await?; + let build = unwrap_with!(builds_res.builds.into_iter().next(), BUILD_NOT_FOUND); + + // Ensure build belongs to this game/env + if let Some(build_game_id) = build.game_id { + ensure_with!(build_game_id == game_id, BUILD_NOT_FOUND); + } else if let Some(build_env_id) = build.env_id { + ensure_with!(build_env_id == env_id, BUILD_NOT_FOUND); + } + + Ok(build) + } + // Resolve build from tags + (None, Some(build_tags)) => { + let build_tags = unwrap_with!( + serde_json::from_value::>(build_tags).ok(), + API_BAD_BODY, + error = "`build_tags` must be `Map`" + ); + + ensure_with!( + build_tags.len() < 8, + API_BAD_BODY, + error = "Too many build tags (max 8)." + ); + + for (k, v) in &build_tags { + ensure_with!( + !k.is_empty(), + API_BAD_BODY, + error = "build_tags[]: Build tag label cannot be empty." + ); + ensure_with!( + k.len() < 32, + API_BAD_BODY, + error = format!( + "build_tags[{:?}]: Build tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ) + ); + ensure_with!( + !v.is_empty(), + API_BAD_BODY, + error = format!("build_tags[{k:?}]: Build tag value cannot be empty.") + ); + ensure_with!( + v.len() < 256, + API_BAD_BODY, + error = + format!("build_tags[{k:?}]: Build tag value too large (max 256 bytes).") + ); + } + + let builds_res = ctx + .op(build::ops::resolve_for_tags::Input { + env_id, + tags: build_tags, + bypass_cache, + }) + .await?; + + let build = unwrap_with!( + builds_res.builds.into_iter().next(), + BUILD_NOT_FOUND_WITH_TAGS + ); + + // Ensure build belongs to this game/env + if let Some(build_game_id) = build.game_id { + ensure_with!(build_game_id == game_id, BUILD_NOT_FOUND); + } else if let Some(build_env_id) = build.env_id { + ensure_with!(build_env_id == env_id, BUILD_NOT_FOUND); + } + + Ok(build) + } + _ => { + bail_with!( + API_BAD_BODY, + error = "must have either `build` or `build_tags`" + ); + } + } +} diff --git a/packages/edge/api/actor/src/route/containers.rs b/packages/edge/api/actor/src/route/containers.rs new file mode 100644 index 0000000000..6fdd7f35ae --- /dev/null +++ b/packages/edge/api/actor/src/route/containers.rs @@ -0,0 +1,668 @@ +use std::collections::HashMap; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use chirp_workflow::prelude::*; +use futures_util::{FutureExt, StreamExt, TryStreamExt}; +use rivet_api::models; +use rivet_convert::{ApiInto, ApiTryInto}; +use serde::Deserialize; +use serde_json::json; +use util::serde::AsHashableExt; + +use crate::{ + assert, + auth::{Auth, CheckOpts, CheckOutput}, +}; + +use super::GlobalQuery; + +#[derive(Debug, Clone, Deserialize)] +pub struct GlobalEndpointTypeQuery { + #[serde(flatten)] + global: GlobalQuery, + endpoint_type: Option, +} + +// MARK: GET /v1/containers/{} +#[tracing::instrument(skip_all)] +pub async fn get( + ctx: Ctx, + container_id: util::Id, + _watch_index: WatchIndexQuery, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { env_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + // Get the actor + let actors_res = ctx + .op(pegboard::ops::actor::get::Input { + actor_ids: vec![container_id], + endpoint_type: query.endpoint_type.map(ApiInto::api_into), + allow_errors: false, + }) + .await?; + let actor = unwrap_with!(actors_res.actors.first(), CONTAINER_NOT_FOUND); + + // Get the datacenter + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + // Validate token can access actor + ensure_with!(actor.env_id == env_id, CONTAINER_NOT_FOUND); + + Ok(models::ContainersGetContainerResponse { + container: Box::new(pegboard::types::convert_container_to_api( + actor.clone(), + dc, + )?), + }) +} + +// MARK: POST /v1/containers +#[tracing::instrument(skip_all)] +pub async fn create( + ctx: Ctx, + body: models::ContainersCreateContainerRequest, + query: GlobalEndpointTypeQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let (game_configs_res, build) = tokio::try_join!( + ctx.op(pegboard::ops::game_config::get::Input { + game_ids: vec![game_id], + }), + resolve_build( + &ctx, + game_id, + env_id, + body.build, + body.build_tags.flatten(), + false + ), + )?; + let game_config = unwrap!(game_configs_res.game_configs.first()); + + let tags = unwrap_with!( + serde_json::from_value(body.tags.unwrap_or_default()).ok(), + API_BAD_BODY, + error = "`tags` must be `Map`" + ); + + if let build::types::BuildAllocationType::None = build.allocation_type { + // todo unsupported build allocation type, use v1 endpoint. + } + + let network = body.network.unwrap_or_default(); + let endpoint_type = body + .runtime + .as_ref() + .and_then(|r| r.network.as_ref()) + .map(|n| n.endpoint_type) + .map(ApiInto::api_into); + + let actor_id = util::Id::new_v1(ctx.config().server()?.rivet.edge()?.datacenter_label()); + tracing::info!(?actor_id, ?tags, "creating container with tags"); + + let created_fut = if network.wait_ready.unwrap_or_default() { + std::future::pending().boxed() + } else { + let mut created_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + + async move { created_sub.next().await }.boxed() + }; + let mut ready_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut fail_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut destroy_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + + ctx.workflow(pegboard::workflows::actor::Input { + actor_id, + env_id, + tags, + resources: Some((*body.resources).api_into()), + lifecycle: body.lifecycle.map(|x| (*x).api_into()).unwrap_or_else(|| { + pegboard::types::ActorLifecycle { + kill_timeout_ms: 0, + durable: false, + } + }), + image_id: build.build_id, + root_user_enabled: game_config.root_user_enabled, + // args: body.runtime.arguments.unwrap_or_default(), + args: Vec::new(), + network_mode: network.mode.unwrap_or_default().api_into(), + environment: body.runtime.and_then(|r| r.environment).unwrap_or_default().as_hashable(), + network_ports: network + .ports + .unwrap_or_default() + .into_iter() + .map(|(s, p)| GlobalResult::Ok(( + s.clone(), + pegboard::workflows::actor::Port { + internal_port: p.internal_port.map(TryInto::try_into).transpose()?, + routing: if let Some(routing) = p.routing { + match *routing { + models::ContainersPortRouting { + guard: Some(_gg), + host: None, + } => pegboard::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + }, + models::ContainersPortRouting { + guard: None, + host: Some(_), + } => pegboard::types::Routing::Host { + protocol: match p.protocol.api_try_into() { + Err(err) if GlobalError::is(&err, formatted_error::code::CONTAINER_FAILED_TO_CREATE) => { + // Add location + bail_with!( + CONTAINER_FAILED_TO_CREATE, + error = format!("network.ports[{s:?}].protocol: Host port protocol must be either TCP or UDP.") + ); + } + x => x?, + }, + }, + models::ContainersPortRouting { .. } => { + bail_with!( + CONTAINER_FAILED_TO_CREATE, + error = format!("network.ports[{s:?}].routing: Must specify either `guard` or `host` routing type.") + ); + } + } + } else { + pegboard::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + } + } + } + ))) + .collect::>>()?, + endpoint_type, + }) + .tag("actor_id", actor_id) + .dispatch() + .await?; + + // Wait for create/ready, fail, or destroy + tokio::select! { + res = created_fut => { res?; }, + res = ready_sub.next() => { res?; }, + res = fail_sub.next() => { + let msg = res?; + bail_with!(CONTAINER_FAILED_TO_CREATE, error = msg.message); + } + res = destroy_sub.next() => { + res?; + bail_with!(CONTAINER_FAILED_TO_CREATE, error = "Container failed before reaching a ready state."); + } + } + + let actors_res = ctx + .op(pegboard::ops::actor::get::Input { + actor_ids: vec![actor_id], + endpoint_type: query.endpoint_type.map(ApiInto::api_into), + allow_errors: false, + }) + .await?; + let actor = unwrap_with!(actors_res.actors.first(), CONTAINER_NOT_FOUND); + + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + Ok(models::ContainersCreateContainerResponse { + container: Box::new(pegboard::types::convert_container_to_api( + actor.clone(), + dc, + )?), + }) +} + +// MARK: DELETE /v1/containers/{} +#[derive(Debug, Clone, Deserialize)] +pub struct DeleteQuery { + #[serde(flatten)] + global: GlobalQuery, + override_kill_timeout: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn destroy( + ctx: Ctx, + container_id: util::Id, + query: DeleteQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + ensure_with!( + query.override_kill_timeout.unwrap_or(0) >= 0, + API_BAD_QUERY_PARAMETER, + parameter = "override_kill_timeout", + error = "must be positive" + ); + ensure_with!( + query.override_kill_timeout.unwrap_or(0) < 2 * 60 * 60 * 1000, + API_BAD_QUERY_PARAMETER, + parameter = "override_kill_timeout", + error = "cannot be longer than 2 hours" + ); + + let mut sub = ctx + .subscribe::(("actor_id", container_id)) + .await?; + + // Get actor after sub is created + let actor = assert::container_for_env(&ctx, container_id, game_id, env_id, None).await?; + + // Already destroyed + if actor.destroy_ts.is_some() { + return Ok(json!({})); + } + + ctx.signal(pegboard::workflows::actor::Destroy { + override_kill_timeout_ms: query.override_kill_timeout, + }) + .to_workflow::() + .tag("actor_id", container_id) + .send() + .await?; + + sub.next().await?; + + Ok(json!({})) +} + +// MARK: POST /v1/containers/{}/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade( + ctx: Ctx, + container_id: util::Id, + body: models::ContainersUpgradeContainerRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + assert::container_for_env(&ctx, container_id, game_id, env_id, None).await?; + + let build = resolve_build( + &ctx, + game_id, + env_id, + body.build, + body.build_tags.flatten(), + true, + ) + .await?; + + ctx.signal(pegboard::workflows::actor::Upgrade { + image_id: build.build_id, + }) + .to_workflow::() + .tag("actor_id", container_id) + .send() + .await?; + + Ok(json!({})) +} + +// MARK: POST /v1/containers/upgrade +#[tracing::instrument(skip_all)] +pub async fn upgrade_all( + ctx: Ctx, + body: models::ContainersUpgradeAllContainersRequest, + query: GlobalQuery, +) -> GlobalResult { + let CheckOutput { game_id, env_id } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let tags = unwrap_with!(body.tags, API_BAD_BODY, error = "missing property `tags`"); + + ensure_with!( + tags.as_object().map(|x| x.len()).unwrap_or_default() <= 8, + API_BAD_BODY, + error = "Too many tags (max 8)." + ); + + let tags = unwrap_with!( + serde_json::from_value::>(tags).ok(), + API_BAD_BODY, + error = "`tags` must be `Map`" + ); + + for (k, v) in &tags { + ensure_with!( + !k.is_empty(), + API_BAD_BODY, + error = "tags[]: Tag label cannot be empty." + ); + ensure_with!( + k.len() <= 32, + API_BAD_BODY, + error = format!( + "tags[{:?}]: Tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ), + ); + ensure_with!( + !v.is_empty(), + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value cannot be empty.") + ); + ensure_with!( + v.len() <= 1024, + API_BAD_BODY, + error = format!("tags[{k:?}]: Tag value too large (max 1024 bytes)."), + ); + } + + let build = resolve_build( + &ctx, + game_id, + env_id, + body.build, + body.build_tags.flatten(), + true, + ) + .await?; + + // Work in batches + let mut count = 0; + let mut created_before = None; + loop { + let list_res = ctx + .op(pegboard::ops::container::list_for_env::Input { + env_id, + tags: tags.clone(), + include_destroyed: false, + created_before, + limit: 10_000, + }) + .await?; + + count += list_res.actors.len(); + + // TODO: Subtracting a ms might skip an actor in a rare edge case, need to build compound + // cursor of [created_at, actor_id] that we pass to the fdb range + created_before = list_res.actors.last().map(|x| x.create_ts - 1); + + let ctx = (*ctx).clone(); + futures_util::stream::iter(list_res.actors) + .map(|actor| { + ctx.signal(pegboard::workflows::actor::Upgrade { + image_id: build.build_id, + }) + .to_workflow::() + .tag("actor_id", actor.actor_id) + .send() + }) + .buffer_unordered(32) + .try_collect::>() + .await?; + + if count < 10_000 { + break; + } + } + + Ok(models::ContainersUpgradeAllContainersResponse { + count: count.try_into()?, + }) +} + +// MARK: GET /v1/containers +#[derive(Debug, Clone, Deserialize)] +pub struct ListQuery { + #[serde(flatten)] + global_endpoint_type: GlobalEndpointTypeQuery, + tags_json: Option, + include_destroyed: Option, + /// Before create timestamp + cursor: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn list_containers( + ctx: Ctx, + _watch_index: WatchIndexQuery, + query: ListQuery, +) -> GlobalResult { + let CheckOutput { env_id, .. } = ctx + .auth() + .check( + ctx.op_ctx(), + CheckOpts { + query: &query.global_endpoint_type.global, + allow_service_token: true, + opt_auth: false, + }, + ) + .await?; + + let created_before = query.cursor.map(|x| x.parse::()).transpose()?; + + let include_destroyed = query.include_destroyed.unwrap_or(false); + + let tags = unwrap_with!( + query + .tags_json + .as_deref() + .map_or(Ok(HashMap::new()), serde_json::from_str) + .ok(), + API_BAD_QUERY_PARAMETER, + parameter = "tags_json", + error = "must be `Map`" + ); + + let list_res = ctx + .op(pegboard::ops::container::list_for_env::Input { + env_id, + tags, + include_destroyed, + created_before, + limit: 32, + }) + .await?; + + let mut actors_res = ctx + .op(pegboard::ops::actor::get::Input { + actor_ids: list_res.actors.iter().map(|x| x.actor_id).collect(), + endpoint_type: query + .global_endpoint_type + .endpoint_type + .map(ApiInto::api_into), + allow_errors: false, + }) + .await?; + actors_res.actors.sort_by_key(|x| -x.create_ts); + + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + // TODO: Subtracting a ms might skip an actor in a rare edge case, need to build compound + // cursor of [created_at, actor_id] that we pass to the fdb range + let cursor = actors_res + .actors + .last() + .map(|x| (x.create_ts - 1).to_string()); + + let actors = actors_res + .actors + .into_iter() + .map(|a| pegboard::types::convert_container_to_api(a, &dc)) + .collect::>>()?; + + Ok(models::ContainersListContainersResponse { + containers: actors, + pagination: Box::new(models::Pagination { cursor }), + }) +} + +#[tracing::instrument(skip_all, fields(%game_id, %env_id, ?build_id, bypass_cache))] +async fn resolve_build( + ctx: &Ctx, + game_id: Uuid, + env_id: Uuid, + build_id: Option, + build_tags: Option, + bypass_cache: bool, +) -> GlobalResult { + match (build_id, build_tags) { + (Some(build_id), None) => { + let builds_res = ctx + .op(build::ops::get::Input { + build_ids: vec![build_id], + }) + .await?; + let build = unwrap_with!(builds_res.builds.into_iter().next(), BUILD_NOT_FOUND); + + // Ensure build belongs to this game/env + if let Some(build_game_id) = build.game_id { + ensure_with!(build_game_id == game_id, BUILD_NOT_FOUND); + } else if let Some(build_env_id) = build.env_id { + ensure_with!(build_env_id == env_id, BUILD_NOT_FOUND); + } + + Ok(build) + } + // Resolve build from tags + (None, Some(build_tags)) => { + let build_tags = unwrap_with!( + serde_json::from_value::>(build_tags).ok(), + API_BAD_BODY, + error = "`build_tags` must be `Map`" + ); + + ensure_with!( + build_tags.len() < 8, + API_BAD_BODY, + error = "Too many build tags (max 8)." + ); + + for (k, v) in &build_tags { + ensure_with!( + !k.is_empty(), + API_BAD_BODY, + error = "build_tags[]: Build tag label cannot be empty." + ); + ensure_with!( + k.len() < 32, + API_BAD_BODY, + error = format!( + "build_tags[{:?}]: Build tag label too large (max 32 bytes).", + util::safe_slice(k, 0, 32), + ) + ); + ensure_with!( + !v.is_empty(), + API_BAD_BODY, + error = format!("build_tags[{k:?}]: Build tag value cannot be empty.") + ); + ensure_with!( + v.len() < 256, + API_BAD_BODY, + error = + format!("build_tags[{k:?}]: Build tag value too large (max 256 bytes).") + ); + } + + let builds_res = ctx + .op(build::ops::resolve_for_tags::Input { + env_id, + tags: build_tags, + bypass_cache, + }) + .await?; + + let build = unwrap_with!( + builds_res.builds.into_iter().next(), + BUILD_NOT_FOUND_WITH_TAGS + ); + + // Ensure build belongs to this game/env + if let Some(build_game_id) = build.game_id { + ensure_with!(build_game_id == game_id, BUILD_NOT_FOUND); + } else if let Some(build_env_id) = build.env_id { + ensure_with!(build_env_id == env_id, BUILD_NOT_FOUND); + } + + Ok(build) + } + _ => { + bail_with!( + API_BAD_BODY, + error = "must have either `build` or `build_tags`" + ); + } + } +} diff --git a/packages/edge/api/actor/src/route/mod.rs b/packages/edge/api/actor/src/route/mod.rs index 279b7f863f..d6f4085c00 100644 --- a/packages/edge/api/actor/src/route/mod.rs +++ b/packages/edge/api/actor/src/route/mod.rs @@ -5,6 +5,7 @@ use rivet_operation::prelude::*; use serde::Deserialize; pub mod actors; +pub mod containers; #[derive(Debug, Clone, Deserialize)] pub struct GlobalQuery { @@ -43,7 +44,7 @@ define_router! { db_driver: chirp_workflow::db::DatabaseFdbSqliteNats, cors: |_config| CorsConfigBuilder::public().build(), routes: { - "actors": { + "v2" / "actors": { GET: actors::list_actors( query: actors::ListQuery, opt_auth: true, @@ -65,7 +66,7 @@ define_router! { ), }, - "actors" / "upgrade": { + "v2" / "actors" / "upgrade": { POST: actors::upgrade_all( query: GlobalQuery, body: models::ActorsUpgradeAllActorsRequest, @@ -78,7 +79,7 @@ define_router! { ), }, - "actors" / util::Id: { + "v2" / "actors" / util::Id: { GET: actors::get( query: actors::GlobalEndpointTypeQuery, opt_auth: true, @@ -100,7 +101,7 @@ define_router! { ), }, - "actors" / util::Id / "upgrade": { + "v2" / "actors" / util::Id / "upgrade": { POST: actors::upgrade( query: GlobalQuery, body: models::ActorsUpgradeActorRequest, @@ -112,5 +113,162 @@ define_router! { }, ), }, + + "v1" / "containers": { + GET: containers::list_containers( + query: containers::ListQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + POST: containers::create( + query: containers::GlobalEndpointTypeQuery, + body: models::ContainersCreateContainerRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / "upgrade": { + POST: containers::upgrade_all( + query: GlobalQuery, + body: models::ContainersUpgradeAllContainersRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / util::Id: { + GET: containers::get( + query: containers::GlobalEndpointTypeQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + + ), + DELETE: containers::destroy( + query: containers::DeleteQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "v1" / "containers" / util::Id / "upgrade": { + POST: containers::upgrade( + query: GlobalQuery, + body: models::ContainersUpgradeContainerRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + }, + + mounts: [ + { + path: OldRouter, + }, + { + path: OldRouter, + prefix: "/v1" + }, + ], +} + +define_router! { + name: OldRouter, + db_driver: chirp_workflow::db::DatabaseFdbSqliteNats, + cors: |_config| CorsConfigBuilder::public().build(), + routes: { + "actors": { + GET: actors::v1::list_actors( + query: actors::v1::ListQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + POST: actors::v1::create( + query: actors::v1::GlobalEndpointTypeQuery, + body: models::ActorsV1CreateActorRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "actors" / "upgrade": { + POST: actors::v1::upgrade_all( + query: GlobalQuery, + body: models::ActorsV1UpgradeAllActorsRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "actors" / Uuid: { + GET: actors::v1::get( + query: actors::v1::GlobalEndpointTypeQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 60_000, bucket: duration::minutes(1) }, + ], + }, + + ), + DELETE: actors::v1::destroy( + query: actors::v1::DeleteQuery, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, + + "actors" / Uuid / "upgrade": { + POST: actors::v1::upgrade( + query: GlobalQuery, + body: models::ActorsV1UpgradeActorRequest, + opt_auth: true, + rate_limit: { + buckets: [ + { count: 10_000, bucket: duration::minutes(1) }, + ], + }, + ), + }, }, } diff --git a/packages/edge/infra/client/container-runner/src/main.rs b/packages/edge/infra/client/container-runner/src/main.rs index 8f6eb3906f..4dcd6a02ad 100644 --- a/packages/edge/infra/client/container-runner/src/main.rs +++ b/packages/edge/infra/client/container-runner/src/main.rs @@ -62,7 +62,7 @@ fn main() -> Result<()> { // Run the container let exit_code = match container::run( msg_tx.clone(), - &actor_path, + &runner_path, &container_id, root_user_enabled, ) { diff --git a/packages/edge/infra/client/manager/src/image_download_handler.rs b/packages/edge/infra/client/manager/src/image_download_handler.rs index d972b1457f..69a9633e7f 100644 --- a/packages/edge/infra/client/manager/src/image_download_handler.rs +++ b/packages/edge/infra/client/manager/src/image_download_handler.rs @@ -117,16 +117,15 @@ impl ImageDownloadHandler { let mut tx = conn.begin().await?; // Get total size of images directory. Note that it doesn't matter if this doesn't - // match the actual fs size because it should either be exactly at or below actual fs - // size. Also calculating fs size manually is expensive. - let (cache_count, images_dir_size) = - sqlx::query_as::<_, (i64, i64)>(indoc!( - " + // match the actual fs size because it should either be exactly at or below actual fs + // size. Also calculating fs size manually is expensive. + let (cache_count, images_dir_size) = sqlx::query_as::<_, (i64, i64)>(indoc!( + " SELECT COUNT(size), COALESCE(SUM(size), 0) FROM images_cache ", - )) - .fetch_one(&mut *tx) - .await?; + )) + .fetch_one(&mut *tx) + .await?; // Prune images // @@ -136,8 +135,7 @@ impl ImageDownloadHandler { // size limit in edge cases by `actual size - compressed size`. In this situation, // that extra difference is already reserved on the file system by the runner // itself. - let (removed_count, removed_bytes) = if images_dir_size as u64 - + image_config.artifact_size + let (removed_count, removed_bytes) = if images_dir_size as u64 + image_config.artifact_size > ctx.config().images.max_cache_size() { // Fetch as many images as it takes to clear up enough space for this new image. @@ -179,7 +177,7 @@ impl ImageDownloadHandler { .bind(image_config.id) .bind( (images_dir_size as u64) - .saturating_add(image_download_size) + .saturating_add(image_config.artifact_size) .saturating_sub(ctx.config().images.max_cache_size()) as i64, ) .fetch_all(&mut *tx) @@ -221,7 +219,8 @@ impl ImageDownloadHandler { }; metrics::IMAGE_CACHE_COUNT.set(cache_count + 1 - removed_count); - metrics::IMAGE_CACHE_SIZE.set(images_dir_size + image_config.artifact_size as i64 - removed_bytes); + metrics::IMAGE_CACHE_SIZE + .set(images_dir_size + image_config.artifact_size as i64 - removed_bytes); sqlx::query(indoc!( " @@ -542,58 +541,6 @@ impl ImageDownloadHandler { Ok(addresses) } - - /// Attempts to fetch HEAD for the image download url and determine the image's download size. - async fn fetch_image_download_size( - &self, - ctx: &Ctx, - image_config: &protocol::Image, - ) -> Result { - // HEAD does not work if not using ATS - if std::env::var("__HACK__DISABLE_FETCH_IMAGE_SIZE").map_or(false, |x| x == "1") { - return Ok(0); - } - - let addresses = self.get_image_addresses(ctx, image_config).await?; - - let mut iter = addresses.into_iter(); - while let Some(artifact_url) = iter.next() { - // Log the full URL we're attempting to download from - tracing::info!(image_id=?image_config.id, %artifact_url, "attempting to download image"); - - match reqwest::Client::new() - .head(&artifact_url) - .send() - .await - .and_then(|res| res.error_for_status()) - { - Ok(res) => { - tracing::info!(image_id=?image_config.id, %artifact_url, "successfully fetched image HEAD"); - - // Read Content-Length header from response - let image_size = res - .headers() - .get(reqwest::header::CONTENT_LENGTH) - .context("no Content-Length header")? - .to_str()? - .parse::() - .context("invalid Content-Length header")?; - - return Ok(image_size); - } - Err(err) => { - tracing::warn!( - image_id=?image_config.id, - %artifact_url, - %err, - "failed to fetch image HEAD" - ); - } - } - } - - bail!("artifact url could not be resolved"); - } } /// Parses total bytes read from tar output. diff --git a/packages/edge/infra/client/manager/src/runner/mod.rs b/packages/edge/infra/client/manager/src/runner/mod.rs index 4e400c1e1f..0c5ee6f758 100644 --- a/packages/edge/infra/client/manager/src/runner/mod.rs +++ b/packages/edge/infra/client/manager/src/runner/mod.rs @@ -49,7 +49,7 @@ const GET_PID_TIMEOUT: Duration = Duration::from_secs(256); // If this was "rivet-", we'd have to report on non-actor cgroups with cadvisor. // // See also packages/core/services/cluster/src/workflows/server/install/install_scripts/files/cadvisor_metric_exporter.sh & packages/core/api/actor/src/route/metrics.rs -pub const RIVET_CONTAINER_PREFIX: &str = "pegboard-actor-"; +pub const RIVET_CONTAINER_PREFIX: &str = "pegboard-runner-"; #[derive(sqlx::FromRow)] pub struct ProxiedPortRow { diff --git a/packages/edge/infra/client/manager/src/runner/setup.rs b/packages/edge/infra/client/manager/src/runner/setup.rs index 91d9977e4f..0d8d0b0598 100644 --- a/packages/edge/infra/client/manager/src/runner/setup.rs +++ b/packages/edge/infra/client/manager/src/runner/setup.rs @@ -543,7 +543,8 @@ impl Runner { // Prepare the arguments for the runner let runner_path = ctx.runner_path(self.runner_id); - let runner_args = vec![runner_path.to_str().context("bad path")?, self.container_id()]; + let container_id = self.container_id(); + let runner_args = vec![runner_path.to_str().context("bad path")?, &container_id]; // NOTE: Pipes are automatically closed on drop (OwnedFd) // Pipe communication between processes diff --git a/packages/edge/infra/guard/server/src/routing/actor.rs b/packages/edge/infra/guard/server/src/routing/actor.rs index 2646cbd77e..e8594276d3 100644 --- a/packages/edge/infra/guard/server/src/routing/actor.rs +++ b/packages/edge/infra/guard/server/src/routing/actor.rs @@ -204,50 +204,100 @@ async fn find_actor( return Ok(None); } - // Create subs before checking for proxied ports - let mut ready_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut fail_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; - let mut destroy_sub = ctx - .subscribe::(("actor_id", actor_id)) - .await?; + let proxied_ports = if actor_id.as_v0().is_some() { + // Create subs before checking for proxied ports + let mut ready_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut fail_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut destroy_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; - let proxied_ports = if let Some(proxied_ports) = - tokio::time::timeout(Duration::from_secs(5), fetch_proxied_ports(ctx, actor_id)).await?? - { - proxied_ports - } else { - tracing::info!(?actor_id, "waiting for actor to become ready"); + if let Some(proxied_ports) = + tokio::time::timeout(Duration::from_secs(5), fetch_proxied_ports(ctx, actor_id)) + .await?? + { + proxied_ports + } else { + tracing::info!(?actor_id, "waiting for actor to become ready"); - // Wait for ready, fail, or destroy - tokio::select! { - res = ready_sub.next() => { res?; }, - res = fail_sub.next() => { - let msg = res?; - bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); - } - res = destroy_sub.next() => { - res?; - bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); + // Wait for ready, fail, or destroy + tokio::select! { + res = ready_sub.next() => { res?; }, + res = fail_sub.next() => { + let msg = res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); + } + res = destroy_sub.next() => { + res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); + } + // Ready timeout + _ = tokio::time::sleep(Duration::from_secs(15)) => { + return Ok(None); + } } - // Ready timeout - _ = tokio::time::sleep(Duration::from_secs(15)) => { + + // Fetch again after ready + let Some(proxied_ports) = + tokio::time::timeout(Duration::from_secs(5), fetch_proxied_ports(ctx, actor_id)) + .await?? + else { return Ok(None); - } + }; + + proxied_ports } + } else { + // Create subs before checking for proxied ports + let mut ready_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut fail_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; + let mut destroy_sub = ctx + .subscribe::(("actor_id", actor_id)) + .await?; - // Fetch again after ready - let Some(proxied_ports) = + if let Some(proxied_ports) = tokio::time::timeout(Duration::from_secs(5), fetch_proxied_ports(ctx, actor_id)) .await?? - else { - return Ok(None); - }; + { + proxied_ports + } else { + tracing::info!(?actor_id, "waiting for actor to become ready"); + + // Wait for ready, fail, or destroy + tokio::select! { + res = ready_sub.next() => { res?; }, + res = fail_sub.next() => { + let msg = res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = msg.message); + } + res = destroy_sub.next() => { + res?; + bail_with!(ACTOR_FAILED_TO_CREATE, error = "Actor failed before reaching a ready state."); + } + // Ready timeout + _ = tokio::time::sleep(Duration::from_secs(15)) => { + return Ok(None); + } + } - proxied_ports + // Fetch again after ready + let Some(proxied_ports) = + tokio::time::timeout(Duration::from_secs(5), fetch_proxied_ports(ctx, actor_id)) + .await?? + else { + return Ok(None); + }; + + proxied_ports + } }; tracing::info!(?actor_id, "actor ready"); diff --git a/packages/edge/infra/guard/server/src/routing/actor_routes.rs b/packages/edge/infra/guard/server/src/routing/actor_routes.rs index 45b86d59a7..b63b85e5d0 100644 --- a/packages/edge/infra/guard/server/src/routing/actor_routes.rs +++ b/packages/edge/infra/guard/server/src/routing/actor_routes.rs @@ -56,17 +56,34 @@ pub async fn route_via_route_config( }; // Query actors with matching tags in this environment - let actors_res = ctx - .op(pegboard::ops::actor::list_for_env::Input { + let (actors_v1_res, actors_res) = tokio::try_join!( + ctx.op(pegboard::ops::actor::v1::list_for_env::Input { + env_id: namespace_id, + tags: selector_tags.clone(), + include_destroyed: false, + created_before: None, + limit: 50, // Reasonable limit for load balancing + }), + ctx.op(pegboard::ops::actor::list_for_env::Input { env_id: namespace_id, tags: selector_tags, include_destroyed: false, created_before: None, limit: 50, // Reasonable limit for load balancing + }), + )?; + + let actors = actors_v1_res + .actors + .into_iter() + .map(|entry| pegboard::ops::actor::list_for_env::ActorEntry { + actor_id: entry.actor_id.into(), + create_ts: entry.create_ts, }) - .await?; + .chain(actors_res.actors.into_iter()) + .collect::>(); - if actors_res.actors.is_empty() { + if actors.is_empty() { tracing::warn!( host = host, path = path, @@ -118,7 +135,7 @@ pub async fn route_via_route_config( // Fetch each actor's details to get their connection information let mut targets = Vec::new(); - for actor_entry in &actors_res.actors { + for actor_entry in &actors { // Find actor's proxied ports if let Some(actor_targets) = find_actor_targets(ctx, &actor_entry.actor_id, dc_id, &path_to_forward).await? diff --git a/packages/edge/services/pegboard/db/runner/migrations/20200101000000_init.up.sql b/packages/edge/services/pegboard/db/runner/migrations/20200101000000_init.up.sql index 06607730a9..d4375862c2 100644 --- a/packages/edge/services/pegboard/db/runner/migrations/20200101000000_init.up.sql +++ b/packages/edge/services/pegboard/db/runner/migrations/20200101000000_init.up.sql @@ -1,6 +1,5 @@ CREATE TABLE IF NOT EXISTS actor_runners ( actor_id String, - generation UInt32, runner_id UUID, started_at DateTime64 (9), finished_at DateTime64 (9) diff --git a/packages/edge/services/pegboard/src/keys/env.rs b/packages/edge/services/pegboard/src/keys/env.rs index 7198cc653a..0f4bb2208a 100644 --- a/packages/edge/services/pegboard/src/keys/env.rs +++ b/packages/edge/services/pegboard/src/keys/env.rs @@ -209,3 +209,117 @@ impl TuplePack for Actor2SubspaceKey { Ok(offset) } } + +#[derive(Debug)] +pub struct ContainerKey { + environment_id: Uuid, + pub create_ts: i64, + pub actor_id: util::Id, +} + +impl ContainerKey { + pub fn new(environment_id: Uuid, create_ts: i64, actor_id: util::Id) -> Self { + ContainerKey { + environment_id, + create_ts, + actor_id, + } + } + + pub fn subspace(environment_id: Uuid) -> ContainerSubspaceKey { + ContainerSubspaceKey::new(environment_id) + } + + pub fn subspace_with_create_ts(environment_id: Uuid, create_ts: i64) -> ContainerSubspaceKey { + ContainerSubspaceKey::new_with_create_ts(environment_id, create_ts) + } +} + +impl FormalKey for ContainerKey { + type Value = ContainerKeyData; + + fn deserialize(&self, raw: &[u8]) -> Result { + serde_json::from_slice(raw).map_err(Into::into) + } + + fn serialize(&self, value: Self::Value) -> Result> { + serde_json::to_vec(&value).map_err(Into::into) + } +} + +impl TuplePack for ContainerKey { + fn pack( + &self, + w: &mut W, + tuple_depth: TupleDepth, + ) -> std::io::Result { + let t = ( + ENV, + self.environment_id, + CONTAINER, + self.create_ts, + self.actor_id, + ); + t.pack(w, tuple_depth) + } +} + +impl<'de> TupleUnpack<'de> for ContainerKey { + fn unpack(input: &[u8], tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> { + let (input, (_, environment_id, _, create_ts, actor_id)) = + <(usize, Uuid, usize, i64, util::Id)>::unpack(input, tuple_depth)?; + let v = ContainerKey { + environment_id, + create_ts, + actor_id, + }; + + Ok((input, v)) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ContainerKeyData { + pub is_destroyed: bool, + pub tags: Vec<(String, String)>, +} + +pub struct ContainerSubspaceKey { + environment_id: Uuid, + create_ts: Option, +} + +impl ContainerSubspaceKey { + pub fn new(environment_id: Uuid) -> Self { + ContainerSubspaceKey { + environment_id, + create_ts: None, + } + } + + pub fn new_with_create_ts(environment_id: Uuid, create_ts: i64) -> Self { + ContainerSubspaceKey { + environment_id, + create_ts: Some(create_ts), + } + } +} + +impl TuplePack for ContainerSubspaceKey { + fn pack( + &self, + w: &mut W, + tuple_depth: TupleDepth, + ) -> std::io::Result { + let mut offset = VersionstampOffset::None { size: 0 }; + + let t = (ENV, self.environment_id, CONTAINER); + offset += t.pack(w, tuple_depth)?; + + if let Some(create_ts) = &self.create_ts { + offset += create_ts.pack(w, tuple_depth)?; + } + + Ok(offset) + } +} diff --git a/packages/edge/services/pegboard/src/lib.rs b/packages/edge/services/pegboard/src/lib.rs index 6464b2400f..c4eb24231b 100644 --- a/packages/edge/services/pegboard/src/lib.rs +++ b/packages/edge/services/pegboard/src/lib.rs @@ -20,8 +20,8 @@ pub fn registry() -> WorkflowResult { let mut registry = Registry::new(); registry.register_workflow::()?; + registry.register_workflow::()?; registry.register_workflow::()?; - registry.register_workflow::()?; Ok(registry) } diff --git a/packages/edge/services/pegboard/src/ops/actor/list_for_env.rs b/packages/edge/services/pegboard/src/ops/actor/list_for_env.rs index 6b919c2563..4d698ab36c 100644 --- a/packages/edge/services/pegboard/src/ops/actor/list_for_env.rs +++ b/packages/edge/services/pegboard/src/ops/actor/list_for_env.rs @@ -36,10 +36,9 @@ pub async fn pegboard_actor_list_for_env( .fdb() .await? .run(|tx, _mc| async move { - // Read from actor2 first - let actor2_subspace = + let actor_subspace = keys::subspace().subspace(&keys::env::Actor2Key::subspace(input.env_id)); - let (start2, end2) = actor2_subspace.range(); + let (start2, end2) = actor_subspace.range(); let end2 = if let Some(created_before) = input.created_before { fdb_util::end_of_key_range(&keys::subspace().pack( @@ -88,59 +87,6 @@ pub async fn pegboard_actor_list_for_env( } } - // Read from old actors - let actor_subspace = - keys::subspace().subspace(&keys::env::ActorKey::subspace(input.env_id)); - let (start, end) = actor_subspace.range(); - - let end = if let Some(created_before) = input.created_before { - keys::subspace().pack(&keys::env::ActorKey::new( - input.env_id, - created_before, - Uuid::nil(), - )) - } else { - end - }; - - let mut stream = tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Iterator, - reverse: true, - ..(start, end).into() - }, - // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, - ); - - while let Some(entry) = stream.try_next().await? { - let actor_key = keys::subspace() - .unpack::(entry.key()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let data = actor_key - .deserialize(entry.value()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - if input.include_destroyed || !data.is_destroyed { - // Compute intersection between ds tags and input tags - let tags_match = input - .tags - .iter() - .all(|(k, v)| data.tags.iter().any(|(k2, v2)| k == k2 && v == v2)); - - if tags_match { - results.push(ActorEntry { - actor_id: actor_key.actor_id.into(), - create_ts: actor_key.create_ts, - }); - - if results.len() == input.limit { - break; - } - } - } - } - Ok(results) }) .custom_instrument(tracing::info_span!("actor_list_tx")) diff --git a/packages/edge/services/pegboard/src/ops/actor/log/read.rs b/packages/edge/services/pegboard/src/ops/actor/log/read.rs index f83297356d..6e04ad7e00 100644 --- a/packages/edge/services/pegboard/src/ops/actor/log/read.rs +++ b/packages/edge/services/pegboard/src/ops/actor/log/read.rs @@ -41,7 +41,7 @@ pub struct LogEntry { pub message: Vec, pub stream_type: u8, pub foreign: bool, - pub actor_id: String, + pub actor_id: util::Id, } #[operation] diff --git a/packages/edge/services/pegboard/src/ops/actor/mod.rs b/packages/edge/services/pegboard/src/ops/actor/mod.rs index fe28e55f09..63df65e76a 100644 --- a/packages/edge/services/pegboard/src/ops/actor/mod.rs +++ b/packages/edge/services/pegboard/src/ops/actor/mod.rs @@ -1,6 +1,6 @@ -pub mod allocate_ingress_ports; pub mod get; pub mod list_for_env; pub mod log; pub mod query; pub mod usage; +pub mod v1; diff --git a/packages/edge/services/pegboard/src/ops/actor/allocate_ingress_ports.rs b/packages/edge/services/pegboard/src/ops/actor/v1/allocate_ingress_ports.rs similarity index 100% rename from packages/edge/services/pegboard/src/ops/actor/allocate_ingress_ports.rs rename to packages/edge/services/pegboard/src/ops/actor/v1/allocate_ingress_ports.rs diff --git a/packages/edge/services/pegboard/src/ops/actor/v1/get.rs b/packages/edge/services/pegboard/src/ops/actor/v1/get.rs new file mode 100644 index 0000000000..fecf049e15 --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/actor/v1/get.rs @@ -0,0 +1,360 @@ +use std::{collections::HashMap, convert::TryInto}; + +use chirp_workflow::prelude::*; +use fdb_util::{FormalKey, SERIALIZABLE}; +use foundationdb as fdb; +use futures_util::{StreamExt, TryStreamExt}; + +use crate::{ + keys, + types::{ + Actor, ActorLifecycle, ActorResources, EndpointType, GameGuardProtocol, HostProtocol, + NetworkMode, Port, Routing, + }, +}; + +#[derive(Debug, sqlx::FromRow)] +struct ActorRow { + env_id: Uuid, + tags: sqlx::types::Json>, + resources_cpu_millicores: Option, + resources_memory_mib: Option, + selected_resources_cpu_millicores: Option, + selected_resources_memory_mib: Option, + lifecycle_kill_timeout_ms: i64, + lifecycle_durable: bool, + create_ts: i64, + start_ts: Option, + connectable_ts: Option, + destroy_ts: Option, + client_wan_hostname: Option, + image_id: Uuid, + args: sqlx::types::Json>, + network_mode: i64, + environment: sqlx::types::Json>, +} + +#[derive(sqlx::FromRow)] +pub(crate) struct PortIngress { + pub(crate) port_name: String, + pub(crate) port_number: Option, + ingress_port_number: i64, + pub(crate) protocol: i64, +} + +#[derive(sqlx::FromRow)] +pub(crate) struct PortHost { + pub(crate) port_name: String, + pub(crate) port_number: Option, + pub(crate) protocol: i64, +} + +#[derive(sqlx::FromRow)] +pub(crate) struct PortProxied { + pub(crate) port_name: String, + pub(crate) source: i64, +} + +struct ActorData { + actor_id: Uuid, + row: ActorRow, + port_ingress_rows: Vec, + port_host_rows: Vec, + port_proxied_rows: Vec, +} + +#[derive(Debug)] +pub struct Input { + pub actor_ids: Vec, + + /// If null, will fall back to the default endpoint type for the datacenter. + /// + /// If the datacenter has a parent hostname, will use hostname endpoint. Otherwise, will use + /// path endpoint. + pub endpoint_type: Option, + + pub allow_errors: bool, +} + +#[derive(Debug)] +pub struct Output { + pub actors: Vec, +} + +#[operation] +pub async fn pegboard_actor_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let actors_with_wf_ids = ctx + .fdb() + .await? + .run(|tx, _mc| async move { + futures_util::stream::iter(input.actor_ids.clone()) + .map(|actor_id| { + let tx = tx.clone(); + async move { + let workflow_id_key = keys::actor::WorkflowIdKey::new(actor_id); + let workflow_id_entry = tx + .get(&keys::subspace().pack(&workflow_id_key), SERIALIZABLE) + .await?; + + let Some(workflow_id_entry) = workflow_id_entry else { + return Ok(None); + }; + + let workflow_id = workflow_id_key + .deserialize(&workflow_id_entry) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + Ok(Some((actor_id, workflow_id))) + } + }) + .buffer_unordered(1024) + .try_filter_map(|x| std::future::ready(Ok(x))) + .try_collect::>() + .await + }) + .custom_instrument(tracing::info_span!("actor_list_wf_tx")) + .await?; + + let actor_data = futures_util::stream::iter(actors_with_wf_ids) + .map(|(actor_id, workflow_id)| async move { + let pool = match ctx.sqlite_for_workflow(workflow_id).await { + Ok(x) => x, + Err(err) + if matches!( + err.as_workflow_error(), + Some(WorkflowError::WorkflowNotFound) + ) => + { + tracing::warn!(?actor_id, ?workflow_id, "actor workflow not found"); + return Ok(None); + } + res => res?, + }; + let pool = &pool; + + let (actor_row, port_ingress_rows, port_host_rows, port_proxied_rows) = tokio::try_join!( + sql_fetch_optional!( + [ctx, ActorRow, pool] + " + SELECT + env_id, + json(tags) AS tags, + resources_cpu_millicores, + resources_memory_mib, + selected_resources_cpu_millicores, + selected_resources_memory_mib, + lifecycle_kill_timeout_ms, + lifecycle_durable, + create_ts, + start_ts, + connectable_ts, + destroy_ts, + client_wan_hostname, + image_id, + json(args) AS args, + network_mode, + json(environment) AS environment + FROM state + ", + ), + sql_fetch_all!( + [ctx, PortIngress, pool] + " + SELECT + port_name, + port_number, + ingress_port_number, + protocol + FROM ports_ingress + ", + ), + sql_fetch_all!( + [ctx, PortHost, pool] + " + SELECT port_name, port_number, protocol + FROM ports_host + ", + ), + sql_fetch_all!( + [ctx, PortProxied, pool] + " + SELECT port_name, source + FROM ports_proxied + ", + ), + )?; + + let Some(actor_row) = actor_row else { + tracing::error!(?actor_id, ?workflow_id, "actor has no state row"); + return Ok(None); + }; + + GlobalResult::Ok(Some(ActorData { + actor_id, + row: actor_row, + port_ingress_rows, + port_host_rows, + port_proxied_rows, + })) + }) + .buffer_unordered(1024) + .map(|x| match x { + Ok(x) => Ok(x), + Err(err) => { + if input.allow_errors { + tracing::warn!(?err, "failed to fetch actor"); + Ok(None) + } else { + Err(err) + } + } + }) + .try_filter_map(|x| std::future::ready(Ok(x))) + .try_collect::>() + .await?; + + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + let dc_res = ctx + .op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }) + .await?; + let dc = unwrap!(dc_res.datacenters.first()); + + let actors = actor_data + .iter() + .map(|s| { + let endpoint_type = input.endpoint_type.unwrap_or_else(|| { + EndpointType::default_for_guard_public_hostname(&dc.guard_public_hostname) + }); + + let is_connectable = s.row.connectable_ts.is_some(); + let wan_hostname = s.row.client_wan_hostname.clone(); + + let ports = s + .port_ingress_rows + .iter() + .map(|port| { + Ok(( + port.port_name.clone(), + create_port_ingress( + s.actor_id, + port, + unwrap!(GameGuardProtocol::from_repr(port.protocol.try_into()?)), + endpoint_type, + &dc.guard_public_hostname, + )?, + )) + }) + .chain(s.port_host_rows.iter().map(|host_port| { + let port_proxied = s.port_proxied_rows.iter().find(|x| { + // Transform the port name based on the driver + let transformed_port_name = + crate::util::pegboard_normalize_port_name(&host_port.port_name); + + x.port_name == transformed_port_name + }); + + Ok(( + host_port.port_name.clone(), + create_port_host( + is_connectable, + wan_hostname.as_deref(), + host_port, + port_proxied, + )?, + )) + })) + .collect::>>()?; + + Ok(Actor { + actor_id: s.actor_id.into(), + env_id: s.row.env_id, + tags: s.row.tags.0.clone(), + resources: if let (Some(cpu_millicores), Some(memory_mib)) = ( + s.row.selected_resources_cpu_millicores, + s.row.selected_resources_memory_mib, + ) { + Some(ActorResources { + cpu_millicores: cpu_millicores.try_into()?, + memory_mib: memory_mib.try_into()?, + }) + } else if let (Some(cpu_millicores), Some(memory_mib)) = + (s.row.resources_cpu_millicores, s.row.resources_memory_mib) + { + Some(ActorResources { + cpu_millicores: cpu_millicores.try_into()?, + memory_mib: memory_mib.try_into()?, + }) + } else { + None + }, + + lifecycle: ActorLifecycle { + kill_timeout_ms: s.row.lifecycle_kill_timeout_ms, + durable: s.row.lifecycle_durable, + }, + args: s.row.args.0.clone(), + environment: s.row.environment.0.clone(), + image_id: s.row.image_id, + network_mode: unwrap!(NetworkMode::from_repr(s.row.network_mode.try_into()?)), + network_ports: ports, + create_ts: s.row.create_ts, + start_ts: s.row.start_ts, + connectable_ts: s.row.connectable_ts, + destroy_ts: s.row.destroy_ts, + }) + }) + .collect::>>()?; + + Ok(Output { actors }) +} + +pub(crate) fn create_port_ingress( + actor_id: Uuid, + port: &PortIngress, + protocol: GameGuardProtocol, + endpoint_type: EndpointType, + guard_public_hostname: &cluster::types::GuardPublicHostname, +) -> GlobalResult { + let (hostname, path) = crate::util::build_actor_hostname_and_path( + actor_id.into(), + &port.port_name, + protocol, + endpoint_type, + guard_public_hostname, + )?; + + Ok(Port { + internal_port: port.port_number.map(TryInto::try_into).transpose()?, + public_hostname: Some(hostname), + public_port: Some(port.ingress_port_number.try_into()?), + public_path: path, + routing: Routing::GameGuard { protocol }, + }) +} + +pub(crate) fn create_port_host( + is_connectable: bool, + wan_hostname: Option<&str>, + host_port: &PortHost, + port_proxied: Option<&PortProxied>, +) -> GlobalResult { + Ok(Port { + internal_port: None, + public_hostname: if is_connectable { + port_proxied.and(wan_hostname).map(|x| x.to_string()) + } else { + None + }, + public_port: if is_connectable { + port_proxied.map(|x| x.source.try_into()).transpose()? + } else { + None + }, + public_path: None, + routing: Routing::Host { + protocol: unwrap!(HostProtocol::from_repr(host_port.protocol.try_into()?)), + }, + }) +} diff --git a/packages/edge/services/pegboard/src/ops/actor/v1/list_for_env.rs b/packages/edge/services/pegboard/src/ops/actor/v1/list_for_env.rs new file mode 100644 index 0000000000..0e6823bb4f --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/actor/v1/list_for_env.rs @@ -0,0 +1,98 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; +use fdb_util::{FormalKey, SNAPSHOT}; +use foundationdb::{self as fdb, options::StreamingMode}; +use futures_util::TryStreamExt; + +use crate::keys; + +#[derive(Debug, Default)] +pub struct Input { + pub env_id: Uuid, + pub tags: HashMap, + pub include_destroyed: bool, + pub created_before: Option, + pub limit: usize, +} + +#[derive(Debug)] +pub struct Output { + pub actors: Vec, +} + +#[derive(Debug)] +pub struct ActorEntry { + pub actor_id: Uuid, + pub create_ts: i64, +} + +#[operation] +pub async fn pegboard_actor_list_for_env( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let actors = ctx + .fdb() + .await? + .run(|tx, _mc| async move { + let actor_subspace = + keys::subspace().subspace(&keys::env::ActorKey::subspace(input.env_id)); + let (start, end) = actor_subspace.range(); + + let end = if let Some(created_before) = input.created_before { + keys::subspace().pack(&keys::env::ActorKey::new( + input.env_id, + created_before, + Uuid::nil(), + )) + } else { + end + }; + + let mut stream = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + reverse: true, + ..(start, end).into() + }, + // NOTE: Does not have to be serializable because we are listing, stale data does not matter + SNAPSHOT, + ); + let mut results = Vec::new(); + + while let Some(entry) = stream.try_next().await? { + let actor_key = keys::subspace() + .unpack::(entry.key()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let data = actor_key + .deserialize(entry.value()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + if input.include_destroyed || !data.is_destroyed { + // Compute intersection between ds tags and input tags + let tags_match = input + .tags + .iter() + .all(|(k, v)| data.tags.iter().any(|(k2, v2)| k == k2 && v == v2)); + + if tags_match { + results.push(ActorEntry { + actor_id: actor_key.actor_id, + create_ts: actor_key.create_ts, + }); + + if results.len() == input.limit { + break; + } + } + } + } + + Ok(results) + }) + .custom_instrument(tracing::info_span!("actor_list_tx")) + .await?; + + Ok(Output { actors }) +} diff --git a/packages/edge/services/pegboard/src/ops/actor/v1/log/export.rs b/packages/edge/services/pegboard/src/ops/actor/v1/log/export.rs new file mode 100644 index 0000000000..cebb0fe3c4 --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/actor/v1/log/export.rs @@ -0,0 +1,109 @@ +use chirp_workflow::prelude::*; +use rivet_operation::prelude::proto::backend; + +use crate::types::LogsStreamType; + +#[derive(Debug)] +pub struct Input { + pub actor_id: Uuid, + pub stream_type: LogsStreamType, +} + +#[derive(Debug)] +pub struct Output { + pub upload_id: Uuid, +} + +#[derive(clickhouse::Row, serde::Deserialize)] +pub struct LogEntry { + pub message: Vec, +} + +#[operation] +pub async fn pegboard_actor_log_read(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let file_name = match input.stream_type { + LogsStreamType::StdOut => "stdout.txt", + LogsStreamType::StdErr => "stderr.txt", + }; + + let mut entries_cursor = ctx + .clickhouse() + .await? + .query(indoc!( + " + SELECT message + FROM db_pegboard_actor_log.actor_logs + WHERE + actor_id = ? AND + stream_type = ? + ORDER BY ts ASC + + UNION ALL + + SELECT message + FROM db_pegboard_actor_log.actor_logs2 + WHERE + actor_id = ? AND + stream_type = ? + ORDER BY ts ASC + " + )) + .bind(input.actor_id) + .bind(input.stream_type as i8) + .bind(input.actor_id.to_string()) + .bind(input.stream_type as i8) + .fetch::()?; + + let mut lines = 0; + let mut buf = Vec::new(); + while let Some(mut entry) = entries_cursor.next().await? { + buf.append(&mut entry.message); + buf.push(b'\n'); + lines += 1; + } + + tracing::info!(?lines, bytes = ?buf.len(), "read all logs"); + + // Upload log + let mime = "text/plain"; + let content_length = buf.len(); + let upload_res = op!([ctx] upload_prepare { + bucket: "bucket-actor-log-export".into(), + files: vec![ + backend::upload::PrepareFile { + path: file_name.into(), + mime: Some(mime.into()), + content_length: content_length as u64, + ..Default::default() + }, + ], + }) + .await?; + + let presigned_req = unwrap!(upload_res.presigned_requests.first()); + let res = reqwest::Client::new() + .put(&presigned_req.url) + .body(buf) + .header(reqwest::header::CONTENT_TYPE, mime) + .header(reqwest::header::CONTENT_LENGTH, content_length) + .send() + .await?; + if res.status().is_success() { + tracing::info!("uploaded successfully"); + } else { + let status = res.status(); + let text = res.text().await; + tracing::error!(?status, ?text, "failed to upload"); + bail!("failed to upload"); + } + + op!([ctx] upload_complete { + upload_id: upload_res.upload_id, + bucket: Some("bucket-pegboard-log-export".into()), + }) + .await?; + + Ok(Output { + upload_id: unwrap!(upload_res.upload_id).as_uuid(), + }) +} diff --git a/packages/edge/services/pegboard/src/ops/actor/v1/log/mod.rs b/packages/edge/services/pegboard/src/ops/actor/v1/log/mod.rs new file mode 100644 index 0000000000..eaafeae343 --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/actor/v1/log/mod.rs @@ -0,0 +1,2 @@ +pub mod export; +pub mod read; diff --git a/packages/edge/services/pegboard/src/ops/actor/v1/log/read.rs b/packages/edge/services/pegboard/src/ops/actor/v1/log/read.rs new file mode 100644 index 0000000000..c67e73b5de --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/actor/v1/log/read.rs @@ -0,0 +1,196 @@ +use chirp_workflow::prelude::*; + +use crate::types::LogsStreamType; + +#[derive(Debug)] +pub struct Input { + pub actor_ids: Vec, + pub stream_types: Vec, + pub count: i64, + pub order_by: Order, + pub query: Query, + pub search_text: Option, + pub search_case_sensitive: Option, + pub search_enable_regex: Option, +} + +#[derive(Debug, Clone, Copy)] +pub enum Query { + All, + BeforeNts(i64), + AfterNts(i64), + Range(i64, i64), +} + +#[derive(Debug, Clone, Copy)] +pub enum Order { + Asc, + Desc, +} + +#[derive(Debug)] +pub struct Output { + pub entries: Vec, +} + +#[derive(Debug, clickhouse::Row, serde::Deserialize)] +pub struct LogEntryRow { + /// In nanoseconds. + pub ts: i64, + pub message: Vec, + pub stream_type: u8, + pub actor_id_str: String, +} + +#[derive(Debug)] +pub struct LogEntry { + /// In nanoseconds. + pub ts: i64, + pub message: Vec, + pub stream_type: u8, + pub actor_id: Uuid, +} + +#[operation] +pub async fn pegboard_actor_log_read(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let clickhouse = ctx.clickhouse().await?; + + // Convert stream types to a vector of u8 + let stream_type_values: Vec = input.stream_types.iter().map(|&st| st as u8).collect(); + + // Extract values from query enum + let (is_all, is_before, is_after, before_nts, after_nts) = match input.query { + Query::All => (true, false, false, None, None), + Query::BeforeNts(nts) => (false, true, false, Some(nts), None), + Query::AfterNts(nts) => (false, false, true, None, Some(nts)), + Query::Range(after, before) => (false, true, true, Some(before), Some(after)), + }; + + // Prepare search parameters + let search_text = input.search_text.as_deref().unwrap_or(""); + let apply_search = !search_text.is_empty(); + let enable_regex = input.search_enable_regex.unwrap_or(false); + let case_sensitive = input.search_case_sensitive.unwrap_or(false); + + // Pre-format the regex strings with or without case sensitivity + let regex_text = if case_sensitive { + search_text.to_string() + } else { + format!("(?i){}", search_text) + }; + + // Direction for ordering + let order_direction = match input.order_by { + Order::Asc => "ASC", + Order::Desc => "DESC", + }; + + // ?? = escaped ? + let query = formatdoc!( + " + SELECT + ts, + message, + stream_type, + actor_id_str + FROM ( + SELECT + ts, + message, + stream_type, + toString(actor_id) as actor_id_str + FROM + db_pegboard_actor_log.actor_logs + UNION ALL + SELECT + ts, + message, + stream_type, + actor_id as actor_id_str + FROM + db_pegboard_actor_log.actor_logs2 + ) + WHERE + actor_id_str IN ? + AND stream_type IN ? + -- Apply timestamp filtering based on query type + AND ( + ? -- is_all + OR (? AND ts < fromUnixTimestamp64Nano(?)) -- is_before + OR (? AND ts > fromUnixTimestamp64Nano(?)) -- is_after + OR (? AND ? AND + ts > fromUnixTimestamp64Nano(?) AND + ts < fromUnixTimestamp64Nano(?)) -- is_range + ) + -- Search filtering with conditional logic + AND ( + NOT ? -- NOT apply_search (always true when search not applied) + OR ( + CASE + WHEN ? THEN -- enable_regex + -- Using pre-formatted regex string + match(message, ?) + ELSE + -- Toggle for case sensitivity without regex + CASE + WHEN ? THEN position(message, ?) > 0 + ELSE positionCaseInsensitive(message, ?) > 0 + END + END + ) + ) + -- Use dynamic direction directly in the ORDER BY clause + ORDER BY ts {order_direction} + LIMIT + ? + " + ); + + // Convert actor IDs to strings for the query + let actor_id_strings: Vec = input.actor_ids.iter().map(|id| id.to_string()).collect(); + + // Build query with all parameters and safety restrictions + let query_builder = clickhouse + .query(&query) + .bind(&actor_id_strings) + .bind(stream_type_values) + // Query type parameters + .bind(is_all) + .bind(is_before) + .bind(before_nts.unwrap_or(0)) + .bind(is_after) + .bind(after_nts.unwrap_or(0)) + .bind(is_before) // First part of AND condition for range + .bind(is_after) // Second part of AND condition for range + .bind(after_nts.unwrap_or(0)) + .bind(before_nts.unwrap_or(0)) + // Search parameters + .bind(apply_search) + .bind(enable_regex) + .bind(regex_text) + .bind(case_sensitive) + .bind(search_text) + .bind(search_text.to_lowercase()) + // Limit + .bind(input.count); + + let entries = query_builder + .fetch_all::() + .await + .map_err(|err| GlobalError::from(err))? + .into_iter() + .map(|x| { + Ok(LogEntry { + ts: x.ts, + message: x.message, + stream_type: x.stream_type, + actor_id: unwrap!( + Uuid::parse_str(&x.actor_id_str).ok(), + "invalid actor log entry uuid" + ), + }) + }) + .collect::>>()?; + + Ok(Output { entries }) +} diff --git a/packages/edge/services/pegboard/src/ops/actor/v1/mod.rs b/packages/edge/services/pegboard/src/ops/actor/v1/mod.rs new file mode 100644 index 0000000000..de065b4ba9 --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/actor/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod allocate_ingress_ports; +pub mod get; +pub mod list_for_env; +pub mod log; diff --git a/packages/edge/services/pegboard/src/ops/container/list_for_env.rs b/packages/edge/services/pegboard/src/ops/container/list_for_env.rs new file mode 100644 index 0000000000..ec0e86e60a --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/container/list_for_env.rs @@ -0,0 +1,96 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; +use fdb_util::{FormalKey, SNAPSHOT}; +use foundationdb::{self as fdb, options::StreamingMode}; +use futures_util::TryStreamExt; + +use crate::keys; + +#[derive(Debug, Default)] +pub struct Input { + pub env_id: Uuid, + pub tags: HashMap, + pub include_destroyed: bool, + pub created_before: Option, + pub limit: usize, +} + +#[derive(Debug)] +pub struct Output { + pub actors: Vec, +} + +#[derive(Debug)] +pub struct ActorEntry { + pub actor_id: util::Id, + pub create_ts: i64, +} + +#[operation] +pub async fn pegboard_container_list_for_env( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let actors = ctx + .fdb() + .await? + .run(|tx, _mc| async move { + let container_subspace = + keys::subspace().subspace(&keys::env::ContainerKey::subspace(input.env_id)); + let (start2, end2) = container_subspace.range(); + + let end2 = if let Some(created_before) = input.created_before { + fdb_util::end_of_key_range(&keys::subspace().pack( + &keys::env::ContainerKey::subspace_with_create_ts(input.env_id, created_before), + )) + } else { + end2 + }; + + let mut stream2 = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + reverse: true, + ..(start2, end2).into() + }, + // NOTE: Does not have to be serializable because we are listing, stale data does not matter + SNAPSHOT, + ); + let mut results = Vec::new(); + + while let Some(entry) = stream2.try_next().await? { + let actor_key = keys::subspace() + .unpack::(entry.key()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let data = actor_key + .deserialize(entry.value()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + if input.include_destroyed || !data.is_destroyed { + // Compute intersection between ds tags and input tags + let tags_match = input + .tags + .iter() + .all(|(k, v)| data.tags.iter().any(|(k2, v2)| k == k2 && v == v2)); + + if tags_match { + results.push(ActorEntry { + actor_id: actor_key.actor_id, + create_ts: actor_key.create_ts, + }); + + if results.len() == input.limit { + break; + } + } + } + } + + Ok(results) + }) + .custom_instrument(tracing::info_span!("actor_list_tx")) + .await?; + + Ok(Output { actors }) +} diff --git a/packages/edge/services/pegboard/src/ops/container/mod.rs b/packages/edge/services/pegboard/src/ops/container/mod.rs new file mode 100644 index 0000000000..85e634a336 --- /dev/null +++ b/packages/edge/services/pegboard/src/ops/container/mod.rs @@ -0,0 +1,3 @@ +//! Containers are just an alias for actors with allocation type = single + +pub mod list_for_env; diff --git a/packages/edge/services/pegboard/src/ops/mod.rs b/packages/edge/services/pegboard/src/ops/mod.rs index 3567b3bc8e..5d6b9df4cb 100644 --- a/packages/edge/services/pegboard/src/ops/mod.rs +++ b/packages/edge/services/pegboard/src/ops/mod.rs @@ -1,3 +1,4 @@ pub mod actor; pub mod client; +pub mod container; pub mod game_config; diff --git a/packages/edge/services/pegboard/src/types.rs b/packages/edge/services/pegboard/src/types.rs index da7490ad35..d9bd6718d7 100644 --- a/packages/edge/services/pegboard/src/types.rs +++ b/packages/edge/services/pegboard/src/types.rs @@ -186,28 +186,9 @@ pub fn convert_actor_to_api( .collect::>(), }), lifecycle: Box::new(value.lifecycle.api_into()), - resources: value.resources.map(ApiInto::api_into).map(Box::new), }) } -impl ApiFrom for ActorResources { - fn api_from(value: models::ActorsResources) -> ActorResources { - ActorResources { - cpu_millicores: value.cpu as u32, - memory_mib: value.memory as u32, - } - } -} - -impl ApiFrom for models::ActorsResources { - fn api_from(value: ActorResources) -> models::ActorsResources { - models::ActorsResources { - cpu: value.cpu_millicores as i32, - memory: value.memory_mib as i32, - } - } -} - impl ApiFrom for ActorLifecycle { fn api_from(value: models::ActorsLifecycle) -> ActorLifecycle { ActorLifecycle { @@ -376,3 +357,459 @@ impl ApiFrom for models::ActorsEndpointType { } } } + +// MARK: Containers +pub fn convert_container_to_api( + value: Actor, + datacenter: &cluster::types::Datacenter, +) -> GlobalResult { + Ok(models::ContainersContainer { + id: value.actor_id.to_string(), + region: datacenter.name_id.clone(), + created_at: util::timestamp::to_string(value.create_ts)?, + // `started_at` -> `connectable_ts` is intentional. We don't expose the internal + // workings of actors to the API, so we need to return the timestamp at which the server can + // actually do anything useful. + started_at: value + .connectable_ts + .map(util::timestamp::to_string) + .transpose()?, + destroyed_at: value + .destroy_ts + .map(util::timestamp::to_string) + .transpose()?, + tags: Some(serde_json::to_value(value.tags)?), + runtime: Box::new(models::ContainersRuntime { + build: value.image_id, + arguments: Some(value.args), + environment: Some(value.environment), + }), + network: Box::new(models::ContainersNetwork { + mode: value.network_mode.api_into(), + ports: value + .network_ports + .into_iter() + .map(|(s, p)| (s, p.api_into())) + .collect::>(), + }), + lifecycle: Box::new(value.lifecycle.api_into()), + resources: Box::new(unwrap!(value.resources, "container should have resources").api_into()), + }) +} + +impl ApiFrom for ActorResources { + fn api_from(value: models::ContainersResources) -> ActorResources { + ActorResources { + cpu_millicores: value.cpu as u32, + memory_mib: value.memory as u32, + } + } +} + +impl ApiFrom for models::ContainersResources { + fn api_from(value: ActorResources) -> models::ContainersResources { + models::ContainersResources { + cpu: value.cpu_millicores as i32, + memory: value.memory_mib as i32, + } + } +} + +impl ApiFrom for ActorLifecycle { + fn api_from(value: models::ContainersLifecycle) -> ActorLifecycle { + ActorLifecycle { + kill_timeout_ms: value.kill_timeout.unwrap_or_default(), + durable: value.durable.unwrap_or_default(), + } + } +} + +impl ApiFrom for models::ContainersLifecycle { + fn api_from(value: ActorLifecycle) -> models::ContainersLifecycle { + models::ContainersLifecycle { + kill_timeout: Some(value.kill_timeout_ms), + durable: Some(value.durable), + } + } +} + +impl ApiFrom for NetworkMode { + fn api_from(value: models::ContainersNetworkMode) -> NetworkMode { + match value { + models::ContainersNetworkMode::Bridge => NetworkMode::Bridge, + models::ContainersNetworkMode::Host => NetworkMode::Host, + } + } +} + +impl ApiFrom for models::ContainersNetworkMode { + fn api_from(value: NetworkMode) -> models::ContainersNetworkMode { + match value { + NetworkMode::Bridge => models::ContainersNetworkMode::Bridge, + NetworkMode::Host => models::ContainersNetworkMode::Host, + } + } +} + +impl ApiFrom for models::ContainersPort { + fn api_from(value: Port) -> models::ContainersPort { + let (protocol, routing, url) = match &value.routing { + Routing::GameGuard { protocol } => { + let url = match ( + protocol, + value.public_hostname.as_ref(), + value.public_port, + value.public_path.as_ref(), + ) { + (GameGuardProtocol::Http, Some(hostname), Some(80) | None, path) => Some( + format!("{protocol}://{hostname}{}", util::format::OptDisplay(path)), + ), + (GameGuardProtocol::Https, Some(hostname), Some(443) | None, path) => Some( + format!("{protocol}://{hostname}{}", util::format::OptDisplay(path)), + ), + ( + GameGuardProtocol::Http | GameGuardProtocol::Https, + Some(hostname), + Some(port), + path, + ) => Some(format!( + "{protocol}://{hostname}:{port}{}", + util::format::OptDisplay(path) + )), + (_protocol, Some(hostname), Some(port), path) => Some(format!( + "{hostname}:{port}{}", + util::format::OptDisplay(path) + )), + (_protocol, Some(hostname), None, path) => { + Some(format!("{hostname}{}", util::format::OptDisplay(path))) + } + _ => None, + }; + + ( + (*protocol).api_into(), + models::ContainersPortRouting { + guard: Some(json!({})), + ..Default::default() + }, + url, + ) + } + Routing::Host { protocol } => ( + (*protocol).api_into(), + models::ContainersPortRouting { + host: Some(json!({})), + ..Default::default() + }, + None, + ), + }; + + models::ContainersPort { + protocol, + internal_port: value.internal_port, + hostname: value.public_hostname, + port: value.public_port, + path: value.public_path, + url, + routing: Box::new(routing), + } + } +} + +impl ApiFrom for GameGuardProtocol { + fn api_from(value: models::ContainersPortProtocol) -> GameGuardProtocol { + match value { + models::ContainersPortProtocol::Udp => GameGuardProtocol::Udp, + models::ContainersPortProtocol::Tcp => GameGuardProtocol::Tcp, + models::ContainersPortProtocol::Http => GameGuardProtocol::Http, + models::ContainersPortProtocol::Https => GameGuardProtocol::Https, + models::ContainersPortProtocol::TcpTls => GameGuardProtocol::TcpTls, + } + } +} + +impl ApiFrom for models::ContainersPortProtocol { + fn api_from(value: GameGuardProtocol) -> models::ContainersPortProtocol { + match value { + GameGuardProtocol::Udp => models::ContainersPortProtocol::Udp, + GameGuardProtocol::Tcp => models::ContainersPortProtocol::Tcp, + GameGuardProtocol::Http => models::ContainersPortProtocol::Http, + GameGuardProtocol::Https => models::ContainersPortProtocol::Https, + GameGuardProtocol::TcpTls => models::ContainersPortProtocol::TcpTls, + } + } +} + +impl ApiTryFrom for HostProtocol { + type Error = GlobalError; + fn api_try_from(value: models::ContainersPortProtocol) -> GlobalResult { + Ok(match value { + models::ContainersPortProtocol::Udp => HostProtocol::Udp, + models::ContainersPortProtocol::Tcp => HostProtocol::Tcp, + _ => { + bail_with!( + CONTAINER_FAILED_TO_CREATE, + error = "Host port protocol must be either TCP or UDP." + ); + } + }) + } +} + +impl ApiFrom for models::ContainersPortProtocol { + fn api_from(value: HostProtocol) -> models::ContainersPortProtocol { + match value { + HostProtocol::Udp => models::ContainersPortProtocol::Udp, + HostProtocol::Tcp => models::ContainersPortProtocol::Tcp, + } + } +} + +impl ApiFrom for EndpointType { + fn api_from(value: models::ContainersEndpointType) -> EndpointType { + match value { + models::ContainersEndpointType::Hostname => EndpointType::Hostname, + models::ContainersEndpointType::Path => EndpointType::Path, + } + } +} + +impl ApiFrom for models::ContainersEndpointType { + fn api_from(value: EndpointType) -> models::ContainersEndpointType { + match value { + EndpointType::Hostname => models::ContainersEndpointType::Hostname, + EndpointType::Path => models::ContainersEndpointType::Path, + } + } +} + +// MARK: V1 +pub mod v1 { + use super::*; + + pub fn convert_actor_to_api( + value: Actor, + datacenter: &cluster::types::Datacenter, + ) -> GlobalResult { + Ok(models::ActorsV1Actor { + id: unwrap!(value.actor_id.as_v0(), "cannot convert new actor to v1"), + region: datacenter.name_id.clone(), + created_at: util::timestamp::to_string(value.create_ts)?, + // `started_at` -> `connectable_ts` is intentional. We don't expose the internal + // workings of actors to the API, so we need to return the timestamp at which the server can + // actually do anything useful. + started_at: value + .connectable_ts + .map(util::timestamp::to_string) + .transpose()?, + destroyed_at: value + .destroy_ts + .map(util::timestamp::to_string) + .transpose()?, + tags: Some(serde_json::to_value(value.tags)?), + runtime: Box::new(models::ActorsV1Runtime { + build: value.image_id, + arguments: Some(value.args), + environment: Some(value.environment), + }), + network: Box::new(models::ActorsV1Network { + mode: value.network_mode.api_into(), + ports: value + .network_ports + .into_iter() + .map(|(s, p)| (s, p.api_into())) + .collect::>(), + }), + lifecycle: Box::new(value.lifecycle.api_into()), + resources: value.resources.map(ApiInto::api_into).map(Box::new), + }) + } + + impl ApiFrom for ActorResources { + fn api_from(value: models::ActorsV1Resources) -> ActorResources { + ActorResources { + cpu_millicores: value.cpu as u32, + memory_mib: value.memory as u32, + } + } + } + + impl ApiFrom for models::ActorsV1Resources { + fn api_from(value: ActorResources) -> models::ActorsV1Resources { + models::ActorsV1Resources { + cpu: value.cpu_millicores as i32, + memory: value.memory_mib as i32, + } + } + } + + impl ApiFrom for ActorLifecycle { + fn api_from(value: models::ActorsV1Lifecycle) -> ActorLifecycle { + ActorLifecycle { + kill_timeout_ms: value.kill_timeout.unwrap_or_default(), + durable: value.durable.unwrap_or_default(), + } + } + } + + impl ApiFrom for models::ActorsV1Lifecycle { + fn api_from(value: ActorLifecycle) -> models::ActorsV1Lifecycle { + models::ActorsV1Lifecycle { + kill_timeout: Some(value.kill_timeout_ms), + durable: Some(value.durable), + } + } + } + + impl ApiFrom for NetworkMode { + fn api_from(value: models::ActorsV1NetworkMode) -> NetworkMode { + match value { + models::ActorsV1NetworkMode::Bridge => NetworkMode::Bridge, + models::ActorsV1NetworkMode::Host => NetworkMode::Host, + } + } + } + + impl ApiFrom for models::ActorsV1NetworkMode { + fn api_from(value: NetworkMode) -> models::ActorsV1NetworkMode { + match value { + NetworkMode::Bridge => models::ActorsV1NetworkMode::Bridge, + NetworkMode::Host => models::ActorsV1NetworkMode::Host, + } + } + } + + impl ApiFrom for models::ActorsV1Port { + fn api_from(value: Port) -> models::ActorsV1Port { + let (protocol, routing, url) = match &value.routing { + Routing::GameGuard { protocol } => { + let url = match ( + protocol, + value.public_hostname.as_ref(), + value.public_port, + value.public_path.as_ref(), + ) { + (GameGuardProtocol::Http, Some(hostname), Some(80) | None, path) => Some( + format!("{protocol}://{hostname}{}", util::format::OptDisplay(path)), + ), + (GameGuardProtocol::Https, Some(hostname), Some(443) | None, path) => Some( + format!("{protocol}://{hostname}{}", util::format::OptDisplay(path)), + ), + ( + GameGuardProtocol::Http | GameGuardProtocol::Https, + Some(hostname), + Some(port), + path, + ) => Some(format!( + "{protocol}://{hostname}:{port}{}", + util::format::OptDisplay(path) + )), + (_protocol, Some(hostname), Some(port), path) => Some(format!( + "{hostname}:{port}{}", + util::format::OptDisplay(path) + )), + (_protocol, Some(hostname), None, path) => { + Some(format!("{hostname}{}", util::format::OptDisplay(path))) + } + _ => None, + }; + + ( + (*protocol).api_into(), + models::ActorsV1PortRouting { + guard: Some(json!({})), + ..Default::default() + }, + url, + ) + } + Routing::Host { protocol } => ( + (*protocol).api_into(), + models::ActorsV1PortRouting { + host: Some(json!({})), + ..Default::default() + }, + None, + ), + }; + + models::ActorsV1Port { + protocol, + internal_port: value.internal_port, + hostname: value.public_hostname, + port: value.public_port, + path: value.public_path, + url, + routing: Box::new(routing), + } + } + } + + impl ApiFrom for GameGuardProtocol { + fn api_from(value: models::ActorsV1PortProtocol) -> GameGuardProtocol { + match value { + models::ActorsV1PortProtocol::Udp => GameGuardProtocol::Udp, + models::ActorsV1PortProtocol::Tcp => GameGuardProtocol::Tcp, + models::ActorsV1PortProtocol::Http => GameGuardProtocol::Http, + models::ActorsV1PortProtocol::Https => GameGuardProtocol::Https, + models::ActorsV1PortProtocol::TcpTls => GameGuardProtocol::TcpTls, + } + } + } + + impl ApiFrom for models::ActorsV1PortProtocol { + fn api_from(value: GameGuardProtocol) -> models::ActorsV1PortProtocol { + match value { + GameGuardProtocol::Udp => models::ActorsV1PortProtocol::Udp, + GameGuardProtocol::Tcp => models::ActorsV1PortProtocol::Tcp, + GameGuardProtocol::Http => models::ActorsV1PortProtocol::Http, + GameGuardProtocol::Https => models::ActorsV1PortProtocol::Https, + GameGuardProtocol::TcpTls => models::ActorsV1PortProtocol::TcpTls, + } + } + } + + impl ApiTryFrom for HostProtocol { + type Error = GlobalError; + fn api_try_from(value: models::ActorsV1PortProtocol) -> GlobalResult { + Ok(match value { + models::ActorsV1PortProtocol::Udp => HostProtocol::Udp, + models::ActorsV1PortProtocol::Tcp => HostProtocol::Tcp, + _ => { + bail_with!( + ACTOR_FAILED_TO_CREATE, + error = "Host port protocol must be either TCP or UDP." + ); + } + }) + } + } + + impl ApiFrom for models::ActorsV1PortProtocol { + fn api_from(value: HostProtocol) -> models::ActorsV1PortProtocol { + match value { + HostProtocol::Udp => models::ActorsV1PortProtocol::Udp, + HostProtocol::Tcp => models::ActorsV1PortProtocol::Tcp, + } + } + } + + impl ApiFrom for EndpointType { + fn api_from(value: models::ActorsV1EndpointType) -> EndpointType { + match value { + models::ActorsV1EndpointType::Hostname => EndpointType::Hostname, + models::ActorsV1EndpointType::Path => EndpointType::Path, + } + } + } + + impl ApiFrom for models::ActorsV1EndpointType { + fn api_from(value: EndpointType) -> models::ActorsV1EndpointType { + match value { + EndpointType::Hostname => models::ActorsV1EndpointType::Hostname, + EndpointType::Path => models::ActorsV1EndpointType::Path, + } + } + } +} diff --git a/packages/edge/services/pegboard/src/workflows/actor/analytics.rs b/packages/edge/services/pegboard/src/workflows/actor/analytics.rs index 708fea1dd0..a0e349eca8 100644 --- a/packages/edge/services/pegboard/src/workflows/actor/analytics.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/analytics.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; #[derive(Debug, Serialize, Deserialize, Hash)] pub struct InsertClickHouseInput { - pub actor_id: Uuid, + pub actor_id: util::Id, } /// Row to be inserted in to ClickHouse @@ -25,6 +25,7 @@ pub struct ActorClickHouseRow { network_ports_proxied: HashMap, client_id: Uuid, client_wan_hostname: String, + runner_id: Uuid, selected_cpu_millicores: u32, selected_memory_mib: u32, root_user_enabled: bool, @@ -44,6 +45,8 @@ pub struct ActorClickHouseRow { /// 0 = not set started_at: i64, /// See `started_at`. + pending_allocation_at: i64, + /// See `started_at`. connectable_at: i64, /// See `started_at`. finished_at: i64, @@ -95,9 +98,11 @@ struct StateRow { selected_resources_memory_mib: Option, client_id: Option, client_wan_hostname: Option, + runner_id: Option, lifecycle_kill_timeout_ms: i64, lifecycle_durable: bool, create_ts: i64, + pending_allocation_ts: Option, start_ts: Option, connectable_ts: Option, finish_ts: Option, @@ -142,9 +147,11 @@ pub async fn insert_clickhouse( selected_resources_memory_mib, client_id, client_wan_hostname, + runner_id, lifecycle_kill_timeout_ms, lifecycle_durable, create_ts, + pending_allocation_ts, start_ts, connectable_ts, finish_ts, @@ -269,6 +276,7 @@ pub async fn insert_clickhouse( network_ports_proxied: proxied_ports, client_id: state_row.client_id.unwrap_or_default(), client_wan_hostname: state_row.client_wan_hostname.unwrap_or_default(), + runner_id: state_row.runner_id.unwrap_or_default(), selected_cpu_millicores: state_row .selected_resources_cpu_millicores .unwrap_or_default() as u32, @@ -287,6 +295,10 @@ pub async fn insert_clickhouse( cpu_millicores: state_row.resources_cpu_millicores, memory_mib: state_row.resources_memory_mib, created_at: state_row.create_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) + pending_allocation_at: state_row + .pending_allocation_ts + .map(|ts| ts * 1_000_000) + .unwrap_or_default(), started_at: state_row .start_ts .map(|ts| ts * 1_000_000) diff --git a/packages/edge/services/pegboard/src/workflows/actor/destroy.rs b/packages/edge/services/pegboard/src/workflows/actor/destroy.rs index 4bf977c07b..91e80b1fe3 100644 --- a/packages/edge/services/pegboard/src/workflows/actor/destroy.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/destroy.rs @@ -1,22 +1,26 @@ -use build::types::BuildKind; +use build::types::BuildAllocationType; use chirp_workflow::prelude::*; -use fdb_util::{FormalKey, SERIALIZABLE}; -use foundationdb as fdb; +use fdb_util::{end_of_key_range, FormalKey, SERIALIZABLE}; +use foundationdb::{self as fdb, options::ConflictRangeType}; use nix::sys::signal::Signal; -use super::{analytics::InsertClickHouseInput, DestroyComplete, DestroyStarted}; +use super::{ + analytics::InsertClickHouseInput, runtime::ActorRunnerClickhouseRow, DestroyComplete, + DestroyStarted, +}; use crate::{keys, protocol, types::GameGuardProtocol}; #[derive(Debug, Serialize, Deserialize)] pub struct KillCtx { - pub generation: u32, pub kill_timeout_ms: i64, } #[derive(Debug, Serialize, Deserialize)] pub(crate) struct Input { - pub actor_id: Uuid, - pub build_kind: Option, + pub actor_id: util::Id, + pub generation: u32, + pub image_id: Uuid, + pub build_allocation_type: Option, /// Whether or not to send signals to the pb actor. In the case that the actor was already stopped /// or exited, signals are unnecessary. pub kill: Option, @@ -35,26 +39,52 @@ pub(crate) async fn pegboard_actor_destroy( let actor = ctx.activity(UpdateDbInput {}).await?; if let Some(actor) = actor { - let client_workflow_id = actor.client_workflow_id; + if let (Some(start_ts), Some(runner_id)) = (actor.start_ts, actor.runner_id) { + ctx.activity(FinishRunnerClickhouseInput { + actor_id: input.actor_id, + start_ts, + runner_id, + }) + .await?; + } - ctx.activity(UpdateFdbInput { - actor_id: input.actor_id, - build_kind: input.build_kind, - actor, - }) - .await?; + let client_workflow_id = actor.client_workflow_id; + let runner_id = actor.runner_id; + + let res = ctx + .activity(UpdateFdbInput { + actor_id: input.actor_id, + image_id: input.image_id, + build_allocation_type: input.build_allocation_type, + actor, + }) + .await?; + // Destroy actor if let (Some(client_workflow_id), Some(kill_data)) = (client_workflow_id, &input.kill) { kill( ctx, input.actor_id, - kill_data.generation, + input.generation, client_workflow_id, kill_data.kill_timeout_ms, false, ) .await?; } + + // Destroy runner + if let (Some(client_workflow_id), Some(runner_id), true) = + (client_workflow_id, runner_id, res.destroy_runner) + { + ctx.signal(protocol::Command::SignalRunner { + runner_id, + signal: Signal::SIGKILL as i32, + }) + .to_workflow_id(client_workflow_id) + .send() + .await?; + } } // Update ClickHouse analytics with destroyed timestamp @@ -82,6 +112,8 @@ struct UpdateDbOutput { selected_resources_cpu_millicores: Option, tags: sqlx::types::Json>, create_ts: i64, + start_ts: Option, + runner_id: Option, client_id: Option, client_workflow_id: Option, } @@ -93,18 +125,20 @@ async fn update_db( ) -> GlobalResult> { let pool = ctx.sqlite().await?; + // NOTE: Row might not exist if the workflow failed before insert_db sql_fetch_optional!( [ctx, UpdateDbOutput, pool] " UPDATE state SET destroy_ts = ? - WHERE destroy_ts IS NULL RETURNING env_id, selected_resources_memory_mib, selected_resources_cpu_millicores, json(tags) AS tags, create_ts, + start_ts, + runner_id, client_id, client_workflow_id ", @@ -113,15 +147,53 @@ async fn update_db( .await } +#[derive(Debug, Serialize, Deserialize, Hash)] +struct FinishRunnerClickhouseInput { + actor_id: util::Id, + start_ts: i64, + runner_id: Uuid, +} + +#[activity(FinishRunnerClickhouse)] +async fn finish_runner_clickhouse( + ctx: &ActivityCtx, + input: &FinishRunnerClickhouseInput, +) -> GlobalResult<()> { + let inserter = ctx.clickhouse_inserter().await?; + + // Set alloc as finished + inserter.insert( + "db_pegboard_runner", + "actor_runners", + ActorRunnerClickhouseRow { + actor_id: input.actor_id.to_string(), + runner_id: input.runner_id, + started_at: input.start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) + finished_at: util::timestamp::now() * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) + }, + )?; + + Ok(()) +} + #[derive(Debug, Serialize, Deserialize, Hash)] pub struct UpdateFdbInput { - actor_id: Uuid, - build_kind: Option, + actor_id: util::Id, + image_id: Uuid, + build_allocation_type: Option, actor: UpdateDbOutput, } +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct UpdateFdbOutput { + destroy_runner: bool, +} + #[activity(UpdateFdb)] -pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResult<()> { +pub async fn update_fdb( + ctx: &ActivityCtx, + input: &UpdateFdbInput, +) -> GlobalResult { let pool = ctx.sqlite().await?; let ingress_ports = sql_fetch_all!( @@ -133,18 +205,19 @@ pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResu ) .await?; - ctx.fdb() + let destroy_runner = ctx + .fdb() .await? .run(|tx, _mc| { let ingress_ports = ingress_ports.clone(); async move { // Update actor key index in env subspace - let actor_key = keys::env::ActorKey::new( + let actor_key = keys::env::Actor2Key::new( input.actor.env_id, input.actor.create_ts, input.actor_id, ); - let data = keys::env::ActorKeyData { + let data = keys::env::Actor2KeyData { is_destroyed: true, tags: input.actor.tags.0.clone().into_iter().collect(), }; @@ -157,8 +230,10 @@ pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResu clear_ports_and_resources( input.actor_id, - input.build_kind, + input.image_id, + input.build_allocation_type, ingress_ports, + input.actor.runner_id, input.actor.client_id, input.actor.client_workflow_id, input.actor.selected_resources_memory_mib, @@ -171,175 +246,259 @@ pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResu .custom_instrument(tracing::info_span!("actor_destroy_tx")) .await?; - Ok(()) + Ok(UpdateFdbOutput { destroy_runner }) } // TODO: Clean up args /// Clears allocated ports and resources (if they were allocated). pub(crate) async fn clear_ports_and_resources( - actor_id: Uuid, - build_kind: Option, + actor_id: util::Id, + image_id: Uuid, + build_allocation_type: Option, ingress_ports: Vec<(i64, i64)>, + runner_id: Option, client_id: Option, client_workflow_id: Option, selected_resources_memory_mib: Option, selected_resources_cpu_millicores: Option, tx: &fdb::RetryableTransaction, -) -> Result<(), fdb::FdbBindingError> { +) -> Result { // Remove all allocated ingress ports for (protocol, port) in ingress_ports { - let protocol = GameGuardProtocol::from_repr( - usize::try_from(protocol).map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ) - .ok_or_else(|| { - fdb::FdbBindingError::CustomError( - format!("invalid protocol variant: {protocol}").into(), + let ingress_port_key = keys::port::IngressKey2::new( + GameGuardProtocol::from_repr( + usize::try_from(protocol) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, ) - })?; - let port = u16::try_from(port).map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - let ingress_port_key = keys::port::IngressKey::new(protocol, port, actor_id); + .ok_or_else(|| { + fdb::FdbBindingError::CustomError( + format!("invalid protocol variant: {protocol}").into(), + ) + })?, + u16::try_from(port).map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + actor_id, + ); tx.clear(&keys::subspace().pack(&ingress_port_key)); - - let ingress_port_key2 = keys::port::IngressKey::new(protocol, port, actor_id.into()); - - tx.clear(&keys::subspace().pack(&ingress_port_key2)); } // Remove proxied ports - let proxied_ports_key = keys::actor::ProxiedPortsKey::new(actor_id); + let proxied_ports_key = keys::actor2::ProxiedPortsKey::new(actor_id); tx.clear(&keys::subspace().pack(&proxied_ports_key)); if let Some(client_id) = client_id { // This is cleared when the state changes as well as when the actor is destroyed to ensure // consistency during rescheduling and forced deletion. - let actor_key = keys::client::ActorKey::new(client_id, actor_id); + let actor_key = keys::client::Actor2Key::new(client_id, actor_id); tx.clear(&keys::subspace().pack(&actor_key)); } // Release client's resources and update allocation index if let ( - Some(build_kind), + Some(build_allocation_type), + Some(runner_id), Some(client_id), Some(client_workflow_id), Some(selected_resources_memory_mib), Some(selected_resources_cpu_millicores), ) = ( - build_kind, + build_allocation_type, + runner_id, client_id, client_workflow_id, selected_resources_memory_mib, selected_resources_cpu_millicores, ) { - let client_flavor = match build_kind { - BuildKind::DockerImage | BuildKind::OciBundle => protocol::ClientFlavor::Container, - BuildKind::JavaScript => protocol::ClientFlavor::Isolate, - }; - - let remaining_mem_key = keys::client::RemainingMemoryKey::new(client_id); - let remaining_mem_key_buf = keys::subspace().pack(&remaining_mem_key); - let remaining_cpu_key = keys::client::RemainingCpuKey::new(client_id); - let remaining_cpu_key_buf = keys::subspace().pack(&remaining_cpu_key); - let last_ping_ts_key = keys::client::LastPingTsKey::new(client_id); - let last_ping_ts_key_buf = keys::subspace().pack(&last_ping_ts_key); - - let (remaining_mem_entry, remaining_cpu_entry, last_ping_ts_entry) = tokio::try_join!( - tx.get(&remaining_mem_key_buf, SERIALIZABLE), - tx.get(&remaining_cpu_key_buf, SERIALIZABLE), - tx.get(&last_ping_ts_key_buf, SERIALIZABLE), + let client_flavor = protocol::ClientFlavor::Multi; + + let runner_remaining_slots_key = keys::runner::RemainingSlotsKey::new(runner_id); + let runner_remaining_slots_key_buf = keys::subspace().pack(&runner_remaining_slots_key); + let runner_total_slots_key = keys::runner::TotalSlotsKey::new(runner_id); + let runner_total_slots_key_buf = keys::subspace().pack(&runner_total_slots_key); + let client_remaining_mem_key = keys::client::RemainingMemoryKey::new(client_id); + let client_remaining_mem_key_buf = keys::subspace().pack(&client_remaining_mem_key); + let client_remaining_cpu_key = keys::client::RemainingCpuKey::new(client_id); + let client_remaining_cpu_key_buf = keys::subspace().pack(&client_remaining_cpu_key); + let client_last_ping_ts_key = keys::client::LastPingTsKey::new(client_id); + let client_last_ping_ts_key_buf = keys::subspace().pack(&client_last_ping_ts_key); + + let ( + runner_remaining_slots_entry, + runner_total_slots_entry, + client_remaining_mem_entry, + client_remaining_cpu_entry, + client_last_ping_ts_entry, + ) = tokio::try_join!( + tx.get(&runner_remaining_slots_key_buf, SERIALIZABLE), + tx.get(&runner_total_slots_key_buf, SERIALIZABLE), + tx.get(&client_remaining_mem_key_buf, SERIALIZABLE), + tx.get(&client_remaining_cpu_key_buf, SERIALIZABLE), + tx.get(&client_last_ping_ts_key_buf, SERIALIZABLE), )?; - let remaining_mem = remaining_mem_key + let runner_remaining_slots = runner_remaining_slots_key .deserialize( - &remaining_mem_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {remaining_mem_key:?}").into(), + &runner_remaining_slots_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {runner_remaining_slots_key:?}").into(), ))?, ) .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let remaining_cpu = remaining_cpu_key + let runner_total_slots = runner_total_slots_key .deserialize( - &remaining_cpu_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {remaining_cpu_key:?}").into(), + &runner_total_slots_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {runner_total_slots_key:?}").into(), ))?, ) .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let last_ping_ts = last_ping_ts_key - .deserialize(&last_ping_ts_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {last_ping_ts_key:?}").into(), - ))?) + let client_remaining_mem = client_remaining_mem_key + .deserialize( + &client_remaining_mem_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {client_remaining_mem_key:?}").into(), + ))?, + ) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let client_remaining_cpu = client_remaining_cpu_key + .deserialize( + &client_remaining_cpu_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {client_remaining_cpu_key:?}").into(), + ))?, + ) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let client_last_ping_ts = client_last_ping_ts_key + .deserialize( + &client_last_ping_ts_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {client_last_ping_ts_key:?}").into(), + ))?, + ) .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let old_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( + let old_runner_allocation_key = keys::datacenter::RunnersByRemainingSlotsKey::new( + image_id, + runner_remaining_slots, + runner_id, + ); + let old_runner_allocation_key_buf = keys::subspace().pack(&old_runner_allocation_key); + + let old_client_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( client_flavor, - remaining_mem, - last_ping_ts, + client_remaining_mem, + client_last_ping_ts, client_id, ); - let old_allocation_key_buf = keys::subspace().pack(&old_allocation_key); - - let new_mem = remaining_mem - + u64::try_from(selected_resources_memory_mib) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let new_cpu = remaining_cpu - + u64::try_from(selected_resources_cpu_millicores) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - tracing::debug!( - old_mem=%remaining_mem, - old_cpu=%remaining_cpu, - %new_mem, - %new_cpu, - "releasing resources" - ); + let old_client_allocation_key_buf = keys::subspace().pack(&old_client_allocation_key); - // Write new memory - tx.set( - &remaining_mem_key_buf, - &remaining_mem_key - .serialize(new_mem) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - // Write new cpu + let new_runner_remaining_slots = runner_remaining_slots + 1; + + // Write new remaining slots tx.set( - &remaining_cpu_key_buf, - &remaining_cpu_key - .serialize(new_cpu) + &runner_remaining_slots_key_buf, + &runner_remaining_slots_key + .serialize(new_runner_remaining_slots) .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, ); - // Only update allocation idx if it existed before - if tx - .get(&old_allocation_key_buf, SERIALIZABLE) - .await? - .is_some() - { - // Clear old key - tx.clear(&old_allocation_key_buf); - - let new_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( - client_flavor, - new_mem, - last_ping_ts, - client_id, + // Clear old key + tx.clear(&old_runner_allocation_key_buf); + + // Add read conflict + tx.add_conflict_range( + &old_runner_allocation_key_buf, + &end_of_key_range(&old_runner_allocation_key_buf), + ConflictRangeType::Read, + )?; + + let destroy_runner = if new_runner_remaining_slots < runner_total_slots { + let new_runner_allocation_key = keys::datacenter::RunnersByRemainingSlotsKey::new( + image_id, + new_runner_remaining_slots, + runner_id, ); - let new_allocation_key_buf = keys::subspace().pack(&new_allocation_key); + let new_runner_allocation_key_buf = keys::subspace().pack(&new_runner_allocation_key); tx.set( - &new_allocation_key_buf, - &new_allocation_key - .serialize(client_workflow_id) + &new_runner_allocation_key_buf, + &new_runner_allocation_key + .serialize(keys::datacenter::RunnersByRemainingSlotsKeyData { + client_id, + client_workflow_id, + }) .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, ); + + false } - } + // Runner is now empty, release client resources + else { + let new_client_remaining_mem = client_remaining_mem + + u64::try_from(selected_resources_memory_mib) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let new_client_remaining_cpu = client_remaining_cpu + + u64::try_from(selected_resources_cpu_millicores) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + tracing::debug!( + old_mem=%client_remaining_mem, + old_cpu=%client_remaining_cpu, + new_mem=%new_client_remaining_mem, + new_cpu=%new_client_remaining_cpu, + "releasing resources" + ); - Ok(()) + // Write new memory + tx.set( + &client_remaining_mem_key_buf, + &client_remaining_mem_key + .serialize(new_client_remaining_mem) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + // Write new cpu + tx.set( + &client_remaining_cpu_key_buf, + &client_remaining_cpu_key + .serialize(new_client_remaining_cpu) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + // Only update allocation idx if it existed before + if tx + .get(&old_client_allocation_key_buf, SERIALIZABLE) + .await? + .is_some() + { + // Clear old key + tx.clear(&old_client_allocation_key_buf); + + let new_client_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( + client_flavor, + new_client_remaining_mem, + client_last_ping_ts, + client_id, + ); + let new_client_allocation_key_buf = + keys::subspace().pack(&new_client_allocation_key); + + tx.set( + &new_client_allocation_key_buf, + &new_client_allocation_key + .serialize(client_workflow_id) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + } + + // Single container per runner allocations don't require explicitly destroying the runner because + // it is already stopped; the container = the actor. + matches!(build_allocation_type, BuildAllocationType::Multi) + }; + + Ok(destroy_runner) + } else { + Ok(false) + } } pub(crate) async fn kill( ctx: &mut WorkflowCtx, - actor_id: Uuid, + actor_id: util::Id, generation: u32, client_workflow_id: Uuid, kill_timeout_ms: i64, @@ -347,7 +506,7 @@ pub(crate) async fn kill( ) -> GlobalResult<()> { if kill_timeout_ms != 0 { ctx.signal(protocol::Command::SignalActor { - actor_id: actor_id.into(), + actor_id, generation, signal: Signal::SIGTERM as i32, persist_storage, @@ -361,7 +520,7 @@ pub(crate) async fn kill( } ctx.signal(protocol::Command::SignalActor { - actor_id: actor_id.into(), + actor_id, generation, signal: Signal::SIGKILL as i32, persist_storage, diff --git a/packages/edge/services/pegboard/src/workflows/actor/migrations.rs b/packages/edge/services/pegboard/src/workflows/actor/migrations.rs index fefbc34d5a..ede6f53156 100644 --- a/packages/edge/services/pegboard/src/workflows/actor/migrations.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/migrations.rs @@ -3,7 +3,6 @@ use sqlx::Acquire; pub async fn run(ctx: &mut WorkflowCtx) -> GlobalResult<()> { ctx.activity(MigrateInitInput {}).await?; - ctx.v(2).activity(MigrateExtraMetaInput {}).await?; Ok(()) } @@ -13,7 +12,6 @@ struct MigrateInitInput {} #[activity(MigrateInit)] async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalResult<()> { - // Transactions make migrations atomic let pool = ctx.sqlite().await?; let mut conn = pool.conn().await?; let mut tx = conn.begin().await?; @@ -22,16 +20,21 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes [ctx, @tx &mut tx] " CREATE TABLE state ( + -- Updated later + project_id BLOB NOT NULL DEFAULT X'00000000000000000000000000000000', -- UUID + env_id BLOB NOT NULL, -- UUID tags BLOB NOT NULL, -- JSONB, map - - resources_cpu_millicores INT NOT NULL, - resources_memory_mib INT NOT NULL, + + resources_cpu_millicores INT, + resources_memory_mib INT, -- Chosen based on tier selected_resources_cpu_millicores INT, selected_resources_memory_mib INT, + old_runner_id BLOB, -- UUID + runner_id BLOB, -- UUID client_id BLOB, -- UUID client_workflow_id BLOB, -- UUID client_wan_hostname TEXT, @@ -40,6 +43,7 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes lifecycle_durable INT NOT NULL DEFAULT false, -- BOOLEAN create_ts INT NOT NULL, + pending_allocation_ts INT, -- Set if currently pending alloc start_ts INT, connectable_ts INT, finish_ts INT, @@ -48,7 +52,12 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes image_id BLOB NOT NULL, -- UUID args BLOB NOT NULL, -- JSONB, list network_mode INT NOT NULL, -- pegboard::types::NetworkMode - environment BLOB NOT NULL -- JSONB, map + environment BLOB NOT NULL, -- JSONB, map + + -- Updated later + root_user_enabled INT NOT NULL DEFAULT false, + build_kind INT NOT NULL DEFAULT -1, + build_compression INT NOT NULL DEFAULT -1 ) STRICT; CREATE TABLE ports_ingress ( @@ -77,28 +86,3 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes Ok(()) } - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct MigrateExtraMetaInput {} - -#[activity(MigrateExtraMeta)] -async fn migrate_extra_meta(ctx: &ActivityCtx, _input: &MigrateExtraMetaInput) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - let mut conn = pool.conn().await?; - let mut tx = conn.begin().await?; - - sql_execute!( - [ctx, @tx &mut tx] - " - ALTER TABLE state ADD project_id BLOB DEFAULT X'00000000000000000000000000000000'; -- UUID - ALTER TABLE state ADD root_user_enabled INT DEFAULT false; - ALTER TABLE state ADD build_kind INT DEFAULT -1; - ALTER TABLE state ADD build_compression INT DEFAULT -1; - ", - ) - .await?; - - tx.commit().await?; - - Ok(()) -} diff --git a/packages/edge/services/pegboard/src/workflows/actor/mod.rs b/packages/edge/services/pegboard/src/workflows/actor/mod.rs index 2c82f7ec9c..5ea77154af 100644 --- a/packages/edge/services/pegboard/src/workflows/actor/mod.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/mod.rs @@ -2,11 +2,11 @@ use analytics::InsertClickHouseInput; use chirp_workflow::prelude::*; use destroy::KillCtx; use futures_util::FutureExt; -use rivet_util::serde::HashableMap; use crate::{ protocol, types::{ActorLifecycle, ActorResources, EndpointType, NetworkMode, Routing}, + workflows::client::AllocatePendingActorsInput, }; mod analytics; @@ -14,6 +14,7 @@ pub mod destroy; mod migrations; mod runtime; mod setup; +pub mod v1; // A small amount of time to separate the completion of the drain to the deletion of the cluster server. We // want the drain to complete first. @@ -32,17 +33,18 @@ const RETRY_RESET_DURATION_MS: i64 = util::duration::minutes(10); #[derive(Clone, Debug, Serialize, Deserialize, Hash)] pub struct Input { - pub actor_id: Uuid, + pub actor_id: util::Id, pub env_id: Uuid, - pub tags: HashableMap, - pub resources: ActorResources, + pub tags: util::serde::HashableMap, + /// Used to override image resources. + pub resources: Option, pub lifecycle: ActorLifecycle, pub image_id: Uuid, pub root_user_enabled: bool, pub args: Vec, pub network_mode: NetworkMode, - pub environment: HashableMap, - pub network_ports: HashableMap, + pub environment: util::serde::HashableMap, + pub network_ports: util::serde::HashableMap, pub endpoint_type: Option, } @@ -54,7 +56,7 @@ pub struct Port { } #[workflow] -pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { +pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { migrations::run(ctx).await?; let validation_res = ctx @@ -111,7 +113,9 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul ctx.workflow(destroy::Input { actor_id: input.actor_id, - build_kind: None, + generation: 0, + image_id: input.image_id, + build_allocation_type: None, kill: None, }) .output() @@ -133,17 +137,14 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul .send() .await?; - let Some(res) = runtime::spawn_actor(ctx, input, &initial_actor_setup, 0).await? else { - ctx.msg(Failed { - message: "Failed to allocate (no availability).".into(), - }) - .tag("actor_id", input.actor_id) - .send() - .await?; - + let Some(allocate_res) = runtime::spawn_actor(ctx, input, &initial_actor_setup, 0).await? + else { + // Destroyed early ctx.workflow(destroy::Input { actor_id: input.actor_id, - build_kind: Some(initial_actor_setup.meta.build_kind), + generation: 0, + image_id: input.image_id, + build_allocation_type: Some(initial_actor_setup.meta.build_allocation_type), kill: None, }) .output() @@ -152,17 +153,14 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul return Ok(()); }; - ctx.v(2) - .msg(Allocated { - client_id: res.client_id, - }) - .tag("actor_id", input.actor_id) - .send() - .await?; - - let state_res = ctx + let lifecycle_res = ctx .loope( - runtime::State::new(res.client_id, res.client_workflow_id, input.image_id), + runtime::State::new( + allocate_res.runner_id, + allocate_res.client_id, + allocate_res.client_workflow_id, + input.image_id, + ), |ctx, state| { let input = input.clone(); @@ -188,21 +186,14 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul ) .await?; - if let Some(sig) = runtime::reschedule_actor( - ctx, - &input, - state, - state.image_id.unwrap_or(input.image_id), - ) - .await? + if runtime::reschedule_actor(ctx, &input, state, state.image_id).await? { // Destroyed early - return Ok(Loop::Break(runtime::StateRes { + return Ok(Loop::Break(runtime::LifecycleRes { + generation: state.generation, + image_id: state.image_id, kill: Some(KillCtx { - generation: state.generation, - kill_timeout_ms: sig - .override_kill_timeout_ms - .unwrap_or(input.lifecycle.kill_timeout_ms), + kill_timeout_ms: input.lifecycle.kill_timeout_ms, }), })); } else { @@ -210,9 +201,10 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul return Ok(Loop::Continue); } } else { - return Ok(Loop::Break(runtime::StateRes { + return Ok(Loop::Break(runtime::LifecycleRes { + generation: state.generation, + image_id: state.image_id, kill: Some(KillCtx { - generation: state.generation, kill_timeout_ms: input.lifecycle.kill_timeout_ms, }), })); @@ -253,7 +245,10 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul protocol::ActorState::Starting => { state.gc_timeout_ts = None; - ctx.activity(runtime::SetStartedInput {}).await?; + ctx.activity(runtime::SetStartedInput { + actor_id: input.actor_id, + }) + .await?; } protocol::ActorState::Running { ports, .. } => { ctx.join(( @@ -267,9 +262,6 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul )) .await?; - // Old traefik timeout - ctx.removed::>().await?; - let updated = ctx .activity(runtime::SetConnectableInput { connectable: true, @@ -337,15 +329,16 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul ctx, &input, state, - state.image_id.unwrap_or(input.image_id), + state.image_id, ) .await? - .is_some() { // Destroyed early - return Ok(Loop::Break(runtime::StateRes { - // Destroy actor is none here because if we received the destroy - // signal, it is guaranteed that we did not allocate another actor. + return Ok(Loop::Break(runtime::LifecycleRes { + generation: state.generation, + image_id: state.image_id, + // None here because if we received the destroy signal, it is + // guaranteed that we did not allocate another actor. kill: None, })); } @@ -363,13 +356,12 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul .await?; } - return Ok(Loop::Break(runtime::StateRes { + return Ok(Loop::Break(runtime::LifecycleRes { + generation: state.generation, + image_id: state.image_id, // No need to kill if already exited kill: matches!(sig.state, protocol::ActorState::Lost) - .then_some(KillCtx { - generation: state.generation, - kill_timeout_ms: 0, - }), + .then_some(KillCtx { kill_timeout_ms: 0 }), })); } } @@ -381,6 +373,26 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul .send() .await?; + let validation_res = ctx + .activity(runtime::ValidateUpgradeInput { + initial_build_allocation_type: initial_actor_setup + .meta + .build_allocation_type, + image_id: sig.image_id, + }) + .await?; + + if let Some(error_message) = validation_res { + ctx.msg(UpgradeFailed { + message: error_message, + }) + .tag("actor_id", input.actor_id) + .send() + .await?; + + return Ok(Loop::Continue); + } + ctx.activity(runtime::SetConnectableInput { connectable: false }) .await?; @@ -399,23 +411,16 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul image_id: sig.image_id, }) .await?; - state.image_id = Some(sig.image_id); + state.image_id = sig.image_id; - if let Some(sig) = runtime::reschedule_actor( - ctx, - &input, - state, - state.image_id.unwrap_or(input.image_id), - ) - .await? + if runtime::reschedule_actor(ctx, &input, state, state.image_id).await? { // Destroyed early - return Ok(Loop::Break(runtime::StateRes { + return Ok(Loop::Break(runtime::LifecycleRes { + generation: state.generation, + image_id: input.image_id, kill: Some(KillCtx { - generation: state.generation, - kill_timeout_ms: sig - .override_kill_timeout_ms - .unwrap_or(input.lifecycle.kill_timeout_ms), + kill_timeout_ms: input.lifecycle.kill_timeout_ms, }), })); } @@ -435,9 +440,10 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul state.drain_timeout_ts = None; } Main::Destroy(sig) => { - return Ok(Loop::Break(runtime::StateRes { + return Ok(Loop::Break(runtime::LifecycleRes { + generation: state.generation, + image_id: input.image_id, kill: Some(KillCtx { - generation: state.generation, kill_timeout_ms: sig .override_kill_timeout_ms .unwrap_or(input.lifecycle.kill_timeout_ms), @@ -455,23 +461,35 @@ pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResul ctx.workflow(destroy::Input { actor_id: input.actor_id, - build_kind: Some(initial_actor_setup.meta.build_kind.clone()), - kill: state_res.kill, + generation: lifecycle_res.generation, + image_id: lifecycle_res.image_id, + build_allocation_type: Some(initial_actor_setup.meta.build_allocation_type), + kill: lifecycle_res.kill, }) .output() .await?; + // NOTE: The reason we allocate other actors from this actor workflow is because if we instead sent a + // signal to the client wf here it would incur a heavy throughput hit and we need the client wf to be as + // lightweight as possible; processing as few signals that aren't events/commands. + // Allocate other pending actors from queue + let res = ctx.activity(AllocatePendingActorsInput {}).await?; + + // Dispatch pending allocs + for alloc in res.allocations { + ctx.signal(alloc.signal) + .to_workflow::() + .tag("actor_id", alloc.actor_id) + .send() + .await?; + } + Ok(()) } #[message("pegboard_actor_create_complete")] pub struct CreateComplete {} -#[message("pegboard_actor_allocated")] -pub struct Allocated { - pub client_id: Uuid, -} - #[message("pegboard_actor_failed")] pub struct Failed { pub message: String, @@ -480,6 +498,15 @@ pub struct Failed { #[message("pegboard_actor_ready")] pub struct Ready {} +#[signal("pegboard_actor_allocate")] +#[derive(Debug)] +pub struct Allocate { + pub runner_id: Uuid, + pub new_runner: bool, + pub client_id: Uuid, + pub client_workflow_id: Uuid, +} + #[signal("pegboard_actor_destroy")] pub struct Destroy { pub override_kill_timeout_ms: Option, @@ -514,9 +541,20 @@ pub struct StateUpdate { #[message("pegboard_actor_upgrade_started")] pub struct UpgradeStarted {} +#[message("pegboard_actor_upgrade_failed")] +pub struct UpgradeFailed { + pub message: String, +} + #[message("pegboard_actor_upgrade_complete")] pub struct UpgradeComplete {} +join_signal!(PendingAllocation { + Allocate, + Destroy, + // +}); + join_signal!(Main { StateUpdate, Upgrade, @@ -524,14 +562,3 @@ join_signal!(Main { Undrain, Destroy, }); - -// Stub definition -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct WaitForTraefikPollInput {} -#[activity(WaitForTraefikPoll)] -pub async fn wait_for_traefik_poll( - _ctx: &ActivityCtx, - _input: &WaitForTraefikPollInput, -) -> GlobalResult<()> { - Ok(()) -} diff --git a/packages/edge/services/pegboard/src/workflows/actor/runtime.rs b/packages/edge/services/pegboard/src/workflows/actor/runtime.rs index 9ad5454904..07aa077eaa 100644 --- a/packages/edge/services/pegboard/src/workflows/actor/runtime.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/runtime.rs @@ -1,19 +1,22 @@ use std::time::Instant; -use build::types::BuildKind; +use build::types::{BuildAllocationType, BuildKind}; use chirp_workflow::prelude::*; +use cluster::types::BuildDeliveryMethod; use fdb_util::{end_of_key_range, FormalKey, SERIALIZABLE, SNAPSHOT}; use foundationdb::{ self as fdb, options::{ConflictRangeType, StreamingMode}, }; +use futures_util::StreamExt; use futures_util::{FutureExt, TryStreamExt}; +use nix::sys::signal::Signal; use sqlx::Acquire; use super::{ destroy::{self, KillCtx}, - setup, Destroy, Input, ACTOR_START_THRESHOLD_MS, BASE_RETRY_TIMEOUT_MS, - RETRY_RESET_DURATION_MS, + setup, Allocate, Destroy, Input, PendingAllocation, ACTOR_START_THRESHOLD_MS, + BASE_RETRY_TIMEOUT_MS, RETRY_RESET_DURATION_MS, }; use crate::{ keys, metrics, @@ -26,10 +29,11 @@ use crate::{ #[derive(Deserialize, Serialize)] pub struct State { pub generation: u32, + pub runner_id: Uuid, pub client_id: Uuid, pub client_workflow_id: Uuid, - pub image_id: Option, + pub image_id: Uuid, pub drain_timeout_ts: Option, pub gc_timeout_ts: Option, @@ -39,12 +43,13 @@ pub struct State { } impl State { - pub fn new(client_id: Uuid, client_workflow_id: Uuid, image_id: Uuid) -> Self { + pub fn new(runner_id: Uuid, client_id: Uuid, client_workflow_id: Uuid, image_id: Uuid) -> Self { State { generation: 0, client_id, client_workflow_id, - image_id: Some(image_id), + runner_id, + image_id, drain_timeout_ts: None, gc_timeout_ts: Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS), reschedule_state: RescheduleState::default(), @@ -53,7 +58,9 @@ impl State { } #[derive(Serialize, Deserialize)] -pub struct StateRes { +pub struct LifecycleRes { + pub generation: u32, + pub image_id: Uuid, pub kill: Option, } @@ -64,13 +71,17 @@ struct RescheduleState { } #[derive(Debug, Serialize, Deserialize, Hash)] -struct UpdateClientInput { +struct UpdateClientAndRunnerInput { client_id: Uuid, client_workflow_id: Uuid, + runner_id: Uuid, } -#[activity(UpdateClient)] -async fn update_client(ctx: &ActivityCtx, input: &UpdateClientInput) -> GlobalResult<()> { +#[activity(UpdateClientAndRunner)] +async fn update_client_and_runner( + ctx: &ActivityCtx, + input: &UpdateClientAndRunnerInput, +) -> GlobalResult<()> { let client_pool = ctx.sqlite_for_workflow(input.client_workflow_id).await?; let pool = ctx.sqlite().await?; @@ -84,26 +95,101 @@ async fn update_client(ctx: &ActivityCtx, input: &UpdateClientInput) -> GlobalRe .await?; sql_execute!( - [ctx, pool] + [ctx, &pool] " UPDATE state SET - client_id = ?, - client_workflow_id = ?, - client_wan_hostname = ? + pending_allocation_ts = NULL, + client_id = ?1, + client_workflow_id = ?2, + client_wan_hostname = ?3, + runner_id = ?4, + old_runner_id = runner_id ", input.client_id, input.client_workflow_id, &client_wan_hostname, + input.runner_id, ) .await?; Ok(()) } +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ResolveArtifactsInput { + build_upload_id: Uuid, + build_file_name: String, + dc_build_delivery_method: BuildDeliveryMethod, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ResolveArtifactsOutput { + artifact_url_stub: String, + fallback_artifact_url: String, + /// Bytes. + artifact_size: u64, +} + +#[activity(ResolveArtifacts)] +async fn resolve_artifacts( + ctx: &ActivityCtx, + input: &ResolveArtifactsInput, +) -> GlobalResult { + // Get the fallback URL + let fallback_artifact_url = { + tracing::debug!("using s3 direct delivery"); + + // Build client + let s3_client = s3_util::Client::with_bucket_and_endpoint( + ctx.config(), + "bucket-build", + s3_util::EndpointKind::EdgeInternal, + ) + .await?; + + let presigned_req = s3_client + .get_object() + .bucket(s3_client.bucket()) + .key(format!( + "{upload_id}/{file_name}", + upload_id = input.build_upload_id, + file_name = input.build_file_name, + )) + .presigned( + s3_util::aws_sdk_s3::presigning::PresigningConfig::builder() + .expires_in(std::time::Duration::from_secs(15 * 60)) + .build()?, + ) + .await?; + + let addr_str = presigned_req.uri().to_string(); + tracing::debug!(addr = %addr_str, "resolved artifact s3 presigned request"); + + addr_str + }; + + // Get the artifact size + let uploads_res = op!([ctx] upload_get { + upload_ids: vec![input.build_upload_id.into()], + }) + .await?; + let upload = unwrap!(uploads_res.uploads.first()); + + Ok(ResolveArtifactsOutput { + artifact_url_stub: crate::util::image_artifact_url_stub( + ctx.config(), + input.build_upload_id, + &input.build_file_name, + )?, + fallback_artifact_url, + artifact_size: upload.content_length, + }) +} + #[derive(Debug, Serialize, Deserialize, Hash)] struct FetchPortsInput { - actor_id: Uuid, + actor_id: util::Id, endpoint_type: Option, } @@ -166,7 +252,7 @@ async fn fetch_ports(ctx: &ActivityCtx, input: &FetchPortsInput) -> GlobalResult .into_iter() .map(|row| { let port = get::create_port_ingress( - input.actor_id.into(), + input.actor_id, &row, unwrap!(GameGuardProtocol::from_repr(row.protocol.try_into()?)), endpoint_type, @@ -184,8 +270,7 @@ async fn fetch_ports(ctx: &ActivityCtx, input: &FetchPortsInput) -> GlobalResult true, wan_hostname.as_deref(), &row, - // Placeholder, will be replaced in the isolate runner when building - // metadata + // Placeholder, will be replaced by the manager when building metadata Some(&get::PortProxied { port_name: String::new(), source: 0, @@ -204,187 +289,423 @@ async fn fetch_ports(ctx: &ActivityCtx, input: &FetchPortsInput) -> GlobalResult } #[derive(Debug, Serialize, Deserialize, Hash)] -struct AllocateActorInputV1 { - actor_id: Uuid, - build_kind: BuildKind, - resources: protocol::Resources, -} - -#[activity(AllocateActorV1)] -async fn allocate_actor( - ctx: &ActivityCtx, - input: &AllocateActorInputV1, -) -> GlobalResult> { - AllocateActorV2::run( - ctx, - &AllocateActorInputV2 { - actor_id: input.actor_id, - generation: 0, - build_kind: input.build_kind, - resources: input.resources.clone(), - }, - ) - .await -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct AllocateActorInputV2 { - actor_id: Uuid, +struct AllocateActorInput { + actor_id: util::Id, generation: u32, - build_kind: BuildKind, + image_id: Uuid, + build_allocation_type: BuildAllocationType, + build_allocation_total_slots: u32, resources: protocol::Resources, } #[derive(Debug, Serialize, Deserialize)] -pub struct AllocateActorOutputV2 { +pub struct AllocateActorOutput { + pub runner_id: Uuid, + pub new_runner: bool, pub client_id: Uuid, pub client_workflow_id: Uuid, } -#[activity(AllocateActorV2)] -async fn allocate_actor_v2( +// If no availability, returns the timestamp of the actor's queue key +#[activity(AllocateActor)] +async fn allocate_actor( ctx: &ActivityCtx, - input: &AllocateActorInputV2, -) -> GlobalResult> { - let client_flavor = match input.build_kind { - BuildKind::DockerImage | BuildKind::OciBundle => protocol::ClientFlavor::Container, - BuildKind::JavaScript => protocol::ClientFlavor::Isolate, - }; + input: &AllocateActorInput, +) -> GlobalResult> { + let client_flavor = protocol::ClientFlavor::Multi; let memory_mib = input.resources.memory / 1024 / 1024; let start_instant = Instant::now(); + // NOTE: This txn should closely resemble the one found in the allocate_pending_actors activity of the + // client wf let res = ctx .fdb() .await? .run(|tx, _mc| async move { - let ping_threshold_ts = util::timestamp::now() - CLIENT_ELIGIBLE_THRESHOLD_MS; - - // Select a range that only includes clients that have enough remaining mem to allocate this actor - let start = keys::subspace().pack( - &keys::datacenter::ClientsByRemainingMemKey::subspace_with_mem( - client_flavor, - memory_mib, - ), - ); - let client_allocation_subspace = - keys::datacenter::ClientsByRemainingMemKey::subspace(client_flavor); - let end = keys::subspace() - .subspace(&client_allocation_subspace) - .range() - .1; - - let mut stream = tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Iterator, - // Containers bin pack so we reverse the order - reverse: matches!(client_flavor, protocol::ClientFlavor::Container), - ..(start, end).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, just - // the one we choose - SNAPSHOT, - ); + // Check for availability amongst existing runners + let image_queue_exists = if let BuildAllocationType::Multi = input.build_allocation_type + { + // Check if a queue for this image exists + let pending_actor_by_image_subspace = keys::subspace().subspace( + &keys::datacenter::PendingActorByImageIdKey::subspace(input.image_id), + ); + let queue_exists = tx + .get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Exact, + limit: Some(1), + ..(&pending_actor_by_image_subspace).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict with other + // inserts/clears to this range + // queue + SNAPSHOT, + ) + .try_next() + .await? + .is_some(); + + if !queue_exists { + // Select a range that only includes runners that have enough remaining slots to allocate + // this actor + let start = keys::subspace().pack( + &keys::datacenter::RunnersByRemainingSlotsKey::subspace_with_slots( + input.image_id, + 1, + ), + ); + let runner_allocation_subspace = + keys::datacenter::RunnersByRemainingSlotsKey::subspace(input.image_id); + let end = keys::subspace() + .subspace(&runner_allocation_subspace) + .range() + .1; + + let mut stream = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + // Containers bin pack so we reverse the order + reverse: true, + ..(start, end).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, just + // the one we choose + SNAPSHOT, + ); + + loop { + let Some(entry) = stream.try_next().await? else { + break; + }; + + let old_runner_allocation_key = keys::subspace() + .unpack::(entry.key()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + let data = old_runner_allocation_key + .deserialize(entry.value()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Add read conflict only for this key + tx.add_conflict_range( + entry.key(), + &end_of_key_range(entry.key()), + ConflictRangeType::Read, + )?; + + // Clear old entry + tx.clear(entry.key()); + + let new_remaining_slots = + old_runner_allocation_key.remaining_slots.saturating_sub(1); + + // Write new allocation key with 1 less slot + let new_allocation_key = keys::datacenter::RunnersByRemainingSlotsKey::new( + input.image_id, + new_remaining_slots, + old_runner_allocation_key.runner_id, + ); + tx.set(&keys::subspace().pack(&new_allocation_key), entry.value()); + + // Update runner record + let remaining_slots_key = keys::runner::RemainingSlotsKey::new( + old_runner_allocation_key.runner_id, + ); + tx.set( + &keys::subspace().pack(&remaining_slots_key), + &remaining_slots_key + .serialize(new_remaining_slots) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + // Insert actor index key + let client_actor_key = + keys::client::Actor2Key::new(data.client_id, input.actor_id); + tx.set( + &keys::subspace().pack(&client_actor_key), + &client_actor_key + .serialize(input.generation) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + return Ok(Ok(AllocateActorOutput { + runner_id: old_runner_allocation_key.runner_id, + new_runner: false, + client_id: data.client_id, + client_workflow_id: data.client_workflow_id, + })); + } + } - loop { - let Some(entry) = stream.try_next().await? else { - return Ok(None); - }; + queue_exists + } else { + false + }; + + // No available runner found, create a new one + + // Check if a queue exists + let pending_actor_subspace = + keys::subspace().subspace(&keys::datacenter::PendingActorKey::subspace()); + let queue_exists = if image_queue_exists { + // We don't have to check the range if the image queue exists, its guaranteed that this one + // exists too + true + } else { + tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Exact, + limit: Some(1), + ..(&pending_actor_subspace).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict with other + // inserts/clears to this range + // queue + SNAPSHOT, + ) + .next() + .await + .is_some() + }; - let old_allocation_key = keys::subspace() - .unpack::(entry.key()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + if !queue_exists { + let runner_id = Uuid::new_v4(); - // Scan by last ping - if old_allocation_key.last_ping_ts < ping_threshold_ts { - continue; - } + let ping_threshold_ts = util::timestamp::now() - CLIENT_ELIGIBLE_THRESHOLD_MS; - let client_workflow_id = old_allocation_key - .deserialize(entry.value()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - // Add read conflict only for this key - tx.add_conflict_range( - entry.key(), - &end_of_key_range(entry.key()), - ConflictRangeType::Read, - )?; - - // Clear old entry - tx.clear(entry.key()); - - // Read old cpu - let remaining_cpu_key = - keys::client::RemainingCpuKey::new(old_allocation_key.client_id); - let remaining_cpu_key_buf = keys::subspace().pack(&remaining_cpu_key); - let remaining_cpu_entry = tx.get(&remaining_cpu_key_buf, SERIALIZABLE).await?; - let old_remaining_cpu = remaining_cpu_key - .deserialize( - &remaining_cpu_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {remaining_cpu_key:?}").into(), - ))?, - ) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - // Update allocated amount - let new_remaining_mem = old_allocation_key.remaining_mem - memory_mib; - let new_remaining_cpu = old_remaining_cpu - input.resources.cpu; - let new_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( - client_flavor, - new_remaining_mem, - old_allocation_key.last_ping_ts, - old_allocation_key.client_id, + // Select a range that only includes clients that have enough remaining mem to allocate this actor + let start = keys::subspace().pack( + &keys::datacenter::ClientsByRemainingMemKey::subspace_with_mem( + client_flavor, + memory_mib, + ), ); - tx.set(&keys::subspace().pack(&new_allocation_key), entry.value()); - - tracing::debug!( - old_mem=%old_allocation_key.remaining_mem, - old_cpu=%old_remaining_cpu, - new_mem=%new_remaining_mem, - new_cpu=%new_remaining_cpu, - "allocating resources" + let client_allocation_subspace = + keys::datacenter::ClientsByRemainingMemKey::subspace(client_flavor); + let end = keys::subspace() + .subspace(&client_allocation_subspace) + .range() + .1; + + let mut stream = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + // Containers bin pack so we reverse the order + reverse: true, + ..(start, end).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, just + // the one we choose + SNAPSHOT, ); - // Update client record - let remaining_mem_key = - keys::client::RemainingMemoryKey::new(old_allocation_key.client_id); - tx.set( - &keys::subspace().pack(&remaining_mem_key), - &remaining_mem_key - .serialize(new_remaining_mem) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); + loop { + let Some(entry) = stream.try_next().await? else { + break; + }; - tx.set( - &remaining_cpu_key_buf, - &remaining_cpu_key - .serialize(new_remaining_cpu) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + let old_client_allocation_key = keys::subspace() + .unpack::(entry.key()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Scan by last ping + if old_client_allocation_key.last_ping_ts < ping_threshold_ts { + continue; + } + + let client_workflow_id = + old_client_allocation_key + .deserialize(entry.value()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Add read conflict only for this key + tx.add_conflict_range( + entry.key(), + &end_of_key_range(entry.key()), + ConflictRangeType::Read, + )?; + + // Clear old entry + tx.clear(entry.key()); + + // Read old cpu + let remaining_cpu_key = + keys::client::RemainingCpuKey::new(old_client_allocation_key.client_id); + let remaining_cpu_key_buf = keys::subspace().pack(&remaining_cpu_key); + let remaining_cpu_entry = tx.get(&remaining_cpu_key_buf, SERIALIZABLE).await?; + let old_remaining_cpu = remaining_cpu_key + .deserialize(&remaining_cpu_entry.ok_or( + fdb::FdbBindingError::CustomError( + format!("key should exist: {remaining_cpu_key:?}").into(), + ), + )?) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Update allocated amount + let new_remaining_mem = old_client_allocation_key.remaining_mem - memory_mib; + let new_remaining_cpu = old_remaining_cpu - input.resources.cpu; + let new_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( + client_flavor, + new_remaining_mem, + old_client_allocation_key.last_ping_ts, + old_client_allocation_key.client_id, + ); + tx.set(&keys::subspace().pack(&new_allocation_key), entry.value()); + + tracing::debug!( + old_mem=%old_client_allocation_key.remaining_mem, + old_cpu=%old_remaining_cpu, + new_mem=%new_remaining_mem, + new_cpu=%new_remaining_cpu, + "allocating runner resources" + ); + + // Update client record + let remaining_mem_key = + keys::client::RemainingMemoryKey::new(old_client_allocation_key.client_id); + tx.set( + &keys::subspace().pack(&remaining_mem_key), + &remaining_mem_key + .serialize(new_remaining_mem) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + tx.set( + &remaining_cpu_key_buf, + &remaining_cpu_key + .serialize(new_remaining_cpu) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + let remaining_slots = input.build_allocation_total_slots.saturating_sub(1); + let total_slots = input.build_allocation_total_slots; + + // Insert runner records + let remaining_slots_key = keys::runner::RemainingSlotsKey::new(runner_id); + tx.set( + &keys::subspace().pack(&remaining_slots_key), + &remaining_slots_key + .serialize(remaining_slots) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + let total_slots_key = keys::runner::TotalSlotsKey::new(runner_id); + tx.set( + &keys::subspace().pack(&total_slots_key), + &total_slots_key + .serialize(total_slots) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + let image_id_key = keys::runner::ImageIdKey::new(runner_id); + tx.set( + &keys::subspace().pack(&image_id_key), + &image_id_key + .serialize(input.image_id) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + // Insert runner index key if multi. Single allocation per container runners don't need to be + // in the alloc idx because they only have 1 slot + if let BuildAllocationType::Multi = input.build_allocation_type { + let runner_idx_key = keys::datacenter::RunnersByRemainingSlotsKey::new( + input.image_id, + remaining_slots, + runner_id, + ); + tx.set( + &keys::subspace().pack(&runner_idx_key), + &runner_idx_key + .serialize(keys::datacenter::RunnersByRemainingSlotsKeyData { + client_id: old_client_allocation_key.client_id, + client_workflow_id, + }) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + } + + // Insert actor index key + let client_actor_key = keys::client::Actor2Key::new( + old_client_allocation_key.client_id, + input.actor_id, + ); + tx.set( + &keys::subspace().pack(&client_actor_key), + &client_actor_key + .serialize(input.generation) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + return Ok(Ok(AllocateActorOutput { + runner_id, + new_runner: true, + client_id: old_client_allocation_key.client_id, + client_workflow_id, + })); + } + } + + // At this point in the txn there is no availability. Write the actor to the alloc queue to wait. + + let pending_ts = util::timestamp::now(); + + // Write self to image alloc queue + if let BuildAllocationType::Multi = input.build_allocation_type { + let image_pending_alloc_key = keys::datacenter::PendingActorByImageIdKey::new( + input.image_id, + pending_ts, + input.actor_id, ); + let image_pending_alloc_data = keys::datacenter::PendingActorByImageIdKeyData { + generation: input.generation, + build_allocation_type: input.build_allocation_type, + build_allocation_total_slots: input.build_allocation_total_slots, + cpu: input.resources.cpu, + memory: input.resources.memory, + }; - // Insert actor index key - let client_actor_key = - keys::client::ActorKey::new(old_allocation_key.client_id, input.actor_id); + // NOTE: This will conflict with serializable reads to the alloc queue, which is the behavior we + // want. If a client reads from the queue while this is being inserted, one of the two txns will + // retry and we ensure the actor does not end up in queue limbo. tx.set( - &keys::subspace().pack(&client_actor_key), - &client_actor_key - .serialize(input.generation) + &keys::subspace().pack(&image_pending_alloc_key), + &image_pending_alloc_key + .serialize(image_pending_alloc_data) .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, ); - - return Ok(Some(AllocateActorOutputV2 { - client_id: old_allocation_key.client_id, - client_workflow_id, - })); } + + // Write self to global alloc queue + let pending_alloc_key = + keys::datacenter::PendingActorKey::new(pending_ts, input.actor_id); + let pending_alloc_data = keys::datacenter::PendingActorKeyData { + generation: input.generation, + image_id: input.image_id, + build_allocation_type: input.build_allocation_type, + build_allocation_total_slots: input.build_allocation_total_slots, + cpu: input.resources.cpu, + memory: input.resources.memory, + }; + + // NOTE: This will conflict with serializable reads to the alloc queue, which is the behavior we + // want. If a client reads from the queue while this is being inserted, one of the two txns will + // retry and we ensure the actor does not end up in queue limbo. + tx.set( + &keys::subspace().pack(&pending_alloc_key), + &pending_alloc_key + .serialize(pending_alloc_data) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + return Ok(Err(pending_ts)); }) .custom_instrument(tracing::info_span!("actor_allocate_tx")) .await?; let dt = start_instant.elapsed().as_secs_f64(); metrics::ACTOR_ALLOCATE_DURATION - .with_label_values(&[&res.is_some().to_string()]) + .with_label_values(&[&res.is_ok().to_string()]) .observe(dt); Ok(res) @@ -392,7 +713,7 @@ async fn allocate_actor_v2( #[derive(Debug, Serialize, Deserialize, Hash)] pub struct UpdateFdbInput { - pub actor_id: Uuid, + pub actor_id: util::Id, pub client_id: Uuid, pub state: protocol::ActorState, } @@ -410,7 +731,7 @@ pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResu // Was inserted when the actor was allocated. This is cleared when the state changes as // well as when the actor is destroyed to ensure consistency during rescheduling and // forced deletion. - let actor_key = keys::client::ActorKey::new(input.client_id, input.actor_id); + let actor_key = keys::client::Actor2Key::new(input.client_id, input.actor_id); tx.clear(&keys::subspace().pack(&actor_key)); Ok(()) @@ -446,27 +767,72 @@ pub async fn update_image(ctx: &ActivityCtx, input: &UpdateImageInput) -> Global } #[derive(Debug, Serialize, Deserialize, Hash)] -pub struct SetStartedInput {} +pub struct SetStartedInput { + pub actor_id: util::Id, +} + +#[derive(Serialize)] +pub(crate) struct ActorRunnerClickhouseRow { + pub actor_id: String, + pub runner_id: Uuid, + pub started_at: i64, + pub finished_at: i64, +} #[activity(SetStarted)] pub async fn set_started(ctx: &ActivityCtx, input: &SetStartedInput) -> GlobalResult<()> { let pool = ctx.sqlite().await?; let start_ts = util::timestamp::now(); - let row = sql_fetch_optional!( - [ctx, (i64,), pool] + let (create_ts, old_start_ts, runner_id, old_runner_id) = sql_fetch_one!( + [ctx, (i64, Option, Uuid, Option), &pool] " - UPDATE state - SET start_ts = ? - WHERE start_ts IS NULL - RETURNING create_ts + SELECT create_ts, start_ts, runner_id, old_runner_id + FROM state + ", + start_ts, + ) + .await?; + + sql_execute!( + [ctx, &pool] + " + UPDATE state SET start_ts = ?1 ", start_ts, ) .await?; - // Add start duration if this is the first start - if let Some((create_ts,)) = row { + let inserter = ctx.clickhouse_inserter().await?; + + // Set old alloc as finished + if let (Some(old_start_ts), Some(old_runner_id)) = (old_start_ts, old_runner_id) { + inserter.insert( + "db_pegboard_runner", + "actor_runners", + ActorRunnerClickhouseRow { + actor_id: input.actor_id.to_string(), + runner_id: old_runner_id, + started_at: old_start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) + finished_at: start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) + }, + )?; + } + + // Insert new alloc + inserter.insert( + "db_pegboard_runner", + "actor_runners", + ActorRunnerClickhouseRow { + actor_id: input.actor_id.to_string(), + runner_id, + started_at: start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) + finished_at: 0, + }, + )?; + + // Add start metric for first start + if old_start_ts.is_none() { let dt = (start_ts - create_ts) as f64 / 1000.0; metrics::ACTOR_START_DURATION .with_label_values(&[]) @@ -540,7 +906,7 @@ pub async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> Global #[derive(Debug, Serialize, Deserialize, Hash)] pub struct InsertPortsFdbInput { - pub actor_id: Uuid, + pub actor_id: util::Id, pub ports: util::serde::HashableMap, } @@ -582,7 +948,7 @@ pub async fn insert_ports_fdb(ctx: &ActivityCtx, input: &InsertPortsFdbInput) -> .map(|(port_name, port, ingress_port_number, protocol)| { let protocol = unwrap!(GameGuardProtocol::from_repr((*protocol).try_into()?)); - Ok(keys::actor::ProxiedPort { + Ok(keys::actor2::ProxiedPort { port_name: port_name.clone(), create_ts, lan_hostname: port.lan_hostname.clone(), @@ -599,7 +965,7 @@ pub async fn insert_ports_fdb(ctx: &ActivityCtx, input: &InsertPortsFdbInput) -> .run(|tx, _mc| { let proxied_ports = proxied_ports.clone(); async move { - let proxied_ports_key = keys::actor::ProxiedPortsKey::new(input.actor_id); + let proxied_ports_key = keys::actor2::ProxiedPortsKey::new(input.actor_id); tx.set( &keys::subspace().pack(&proxied_ports_key), @@ -630,45 +996,93 @@ async fn compare_retry(ctx: &ActivityCtx, input: &CompareRetryInput) -> GlobalRe Ok((now, input.last_retry_ts < now - RETRY_RESET_DURATION_MS)) } -/// Returns whether or not there was availability to spawn the actor. +/// Returns None if a destroy signal was received while pending for allocation. pub async fn spawn_actor( ctx: &mut WorkflowCtx, input: &Input, actor_setup: &setup::ActorSetupCtx, generation: u32, -) -> GlobalResult> { - let res = match ctx.check_version(2).await? { - 1 => { - ctx.activity(AllocateActorInputV1 { - actor_id: input.actor_id, - build_kind: actor_setup.meta.build_kind, - resources: actor_setup.resources.clone(), +) -> GlobalResult> { + // Attempt allocation + let allocate_res = ctx + .activity(AllocateActorInput { + actor_id: input.actor_id, + generation, + image_id: actor_setup.image_id, + build_allocation_type: actor_setup.meta.build_allocation_type, + build_allocation_total_slots: actor_setup.meta.build_allocation_total_slots, + resources: actor_setup.resources.clone(), + }) + .await?; + + let allocate_res = match allocate_res { + Ok(x) => x, + Err(pending_allocation_ts) => { + tracing::warn!( + actor_id=?input.actor_id, + "failed to allocate (no availability), waiting for allocation", + ); + + ctx.activity(SetPendingAllocationInput { + pending_allocation_ts, }) - .await? - } - _ => { - ctx.v(2) - .activity(AllocateActorInputV2 { - actor_id: input.actor_id, - generation, - build_kind: actor_setup.meta.build_kind, - resources: actor_setup.resources.clone(), - }) - .await? - } - }; + .await?; + + // If allocation fails, the allocate txn already inserted this actor into the queue. Now we wait for + // an `Allocate` signal + match ctx.listen::().await? { + PendingAllocation::Allocate(sig) => AllocateActorOutput { + runner_id: sig.runner_id, + new_runner: sig.new_runner, + client_id: sig.client_id, + client_workflow_id: sig.client_workflow_id, + }, + // We ignore the signal's override_kill_timeout_ms because the actor isn't allocated + PendingAllocation::Destroy(_sig) => { + tracing::debug!("destroying before actor allocated"); + + let cleared = ctx + .activity(ClearPendingAllocationInput { + actor_id: input.actor_id, + pending_allocation_ts, + }) + .await?; + + // If this actor was no longer present in the queue it means it was allocated. We must now + // wait for the allocated signal to prevent a race condition. + if !cleared { + let sig = ctx.listen::().await?; + + ctx.activity(UpdateClientAndRunnerInput { + client_id: sig.client_id, + client_workflow_id: sig.client_workflow_id, + runner_id: sig.runner_id, + }) + .await?; + } - let Some(res) = res else { - return Ok(None); + return Ok(None); + } + } + } }; - let (_, ports_res) = ctx + let (_, artifacts_res, ports_res) = ctx .join(( - activity(UpdateClientInput { - client_id: res.client_id, - client_workflow_id: res.client_workflow_id, + activity(UpdateClientAndRunnerInput { + client_id: allocate_res.client_id, + client_workflow_id: allocate_res.client_workflow_id, + runner_id: allocate_res.runner_id, }), - v(2).activity(FetchPortsInput { + // NOTE: We resolve the artifacts here instead of in setup::setup because we don't know how + // long it will be after setup until an actor is allocated so the presigned artifact url might + // expire. + activity(ResolveArtifactsInput { + build_upload_id: actor_setup.meta.build_upload_id, + build_file_name: actor_setup.meta.build_file_name.clone(), + dc_build_delivery_method: actor_setup.meta.dc_build_delivery_method, + }), + activity(FetchPortsInput { actor_id: input.actor_id, endpoint_type: input.endpoint_type, }), @@ -677,64 +1091,84 @@ pub async fn spawn_actor( let cluster_id = ctx.config().server()?.rivet.edge()?.cluster_id; + let image = protocol::Image { + id: actor_setup.image_id, + artifact_url_stub: artifacts_res.artifact_url_stub.clone(), + fallback_artifact_url: Some(artifacts_res.fallback_artifact_url.clone()), + artifact_size: artifacts_res.artifact_size, + kind: match actor_setup.meta.build_kind { + BuildKind::DockerImage => protocol::ImageKind::DockerImage, + BuildKind::OciBundle => protocol::ImageKind::OciBundle, + BuildKind::JavaScript => bail!("actors do not support js builds"), + }, + compression: actor_setup.meta.build_compression.into(), + allocation_type: match actor_setup.meta.build_allocation_type { + BuildAllocationType::None => bail!("actors do not support old builds"), + BuildAllocationType::Single => protocol::ImageAllocationType::Single, + BuildAllocationType::Multi => protocol::ImageAllocationType::Multi, + }, + }; + let ports = ports_res + .ports + .iter() + .map(|port| match port.port.routing { + Routing::GameGuard { protocol } => ( + crate::util::pegboard_normalize_port_name(&port.name), + protocol::Port { + target: port.port_number, + protocol: match protocol { + GameGuardProtocol::Http + | GameGuardProtocol::Https + | GameGuardProtocol::Tcp + | GameGuardProtocol::TcpTls => protocol::TransportProtocol::Tcp, + GameGuardProtocol::Udp => protocol::TransportProtocol::Udp, + }, + routing: protocol::PortRouting::GameGuard, + }, + ), + Routing::Host { protocol } => ( + crate::util::pegboard_normalize_port_name(&port.name), + protocol::Port { + target: port.port_number, + protocol: match protocol { + HostProtocol::Tcp => protocol::TransportProtocol::Tcp, + HostProtocol::Udp => protocol::TransportProtocol::Udp, + }, + routing: protocol::PortRouting::Host, + }, + ), + }) + .collect::>(); + let network_mode = match input.network_mode { + NetworkMode::Bridge => protocol::NetworkMode::Bridge, + NetworkMode::Host => protocol::NetworkMode::Host, + }; + ctx.signal(protocol::Command::StartActor { - actor_id: input.actor_id.into(), + actor_id: input.actor_id, generation, config: Box::new(protocol::ActorConfig { - image: protocol::Image { - id: actor_setup.image_id, - artifact_url_stub: actor_setup.artifact_url_stub.clone(), - fallback_artifact_url: actor_setup.fallback_artifact_url.clone(), - kind: actor_setup.meta.build_kind.into(), - compression: actor_setup.meta.build_compression.into(), - // Always single, this is the old actor wf - allocation_type: protocol::ImageAllocationType::Single, - - // Calculated on the manager for old actors - artifact_size: 0, - }, - root_user_enabled: input.root_user_enabled, - env: input.environment.clone(), - runner: None, - ports: ports_res - .ports - .iter() - .map(|port| match port.port.routing { - Routing::GameGuard { protocol } => ( - crate::util::pegboard_normalize_port_name(&port.name), - protocol::Port { - target: port.port_number, - protocol: match protocol { - GameGuardProtocol::Http - | GameGuardProtocol::Https - | GameGuardProtocol::Tcp - | GameGuardProtocol::TcpTls => protocol::TransportProtocol::Tcp, - GameGuardProtocol::Udp => protocol::TransportProtocol::Udp, - }, - routing: protocol::PortRouting::GameGuard, - }, - ), - Routing::Host { protocol } => ( - crate::util::pegboard_normalize_port_name(&port.name), - protocol::Port { - target: port.port_number, - protocol: match protocol { - HostProtocol::Tcp => protocol::TransportProtocol::Tcp, - HostProtocol::Udp => protocol::TransportProtocol::Udp, - }, - routing: protocol::PortRouting::Host, - }, - ), + runner: if allocate_res.new_runner { + Some(protocol::ActorRunner::New { + runner_id: allocate_res.runner_id, + config: protocol::RunnerConfig { + image: image.clone(), + root_user_enabled: input.root_user_enabled, + resources: actor_setup.resources.clone(), + env: input.environment.clone(), + ports: ports.clone(), + network_mode, + }, + }) + } else { + Some(protocol::ActorRunner::Existing { + runner_id: allocate_res.runner_id, }) - .collect(), - network_mode: match input.network_mode { - NetworkMode::Bridge => protocol::NetworkMode::Bridge, - NetworkMode::Host => protocol::NetworkMode::Host, }, - resources: actor_setup.resources.clone(), + env: input.environment.clone(), metadata: util::serde::Raw::new(&protocol::ActorMetadata { actor: protocol::ActorMetadataActor { - actor_id: input.actor_id.into(), + actor_id: input.actor_id, tags: input.tags.clone(), create_ts: ctx.ts(), }, @@ -759,33 +1193,55 @@ pub async fn spawn_actor( }, cluster: protocol::ActorMetadataCluster { cluster_id }, build: protocol::ActorMetadataBuild { - build_id: input.image_id, + build_id: actor_setup.image_id, }, })?, + + // Deprecated + image, + root_user_enabled: input.root_user_enabled, + resources: actor_setup.resources.clone(), + ports, + network_mode, }), }) - .to_workflow_id(res.client_workflow_id) + .to_workflow_id(allocate_res.client_workflow_id) .send() .await?; - Ok(Some(res)) + Ok(Some(allocate_res)) } +/// Returns true if the actor should be destroyed. pub async fn reschedule_actor( ctx: &mut WorkflowCtx, input: &Input, state: &mut State, image_id: Uuid, -) -> GlobalResult> { +) -> GlobalResult { tracing::debug!(actor_id=?input.actor_id, "rescheduling actor"); - ctx.activity(ClearPortsAndResourcesInput { - actor_id: input.actor_id, - image_id, - client_id: state.client_id, - client_workflow_id: state.client_workflow_id, - }) - .await?; + let res = ctx + .activity(ClearPortsAndResourcesInput { + actor_id: input.actor_id, + image_id, + runner_id: state.runner_id, + client_id: state.client_id, + client_workflow_id: state.client_workflow_id, + }) + .await?; + + // `destroy_runner` is true when this was the last actor running on that runner, meaning we have to + // destroy it. + if res.destroy_runner { + ctx.signal(protocol::Command::SignalRunner { + runner_id: state.runner_id, + signal: Signal::SIGKILL as i32, + }) + .to_workflow_id(state.client_workflow_id) + .send() + .await?; + } let actor_setup = setup::setup(ctx, &input, setup::SetupCtx::Reschedule { image_id }).await?; @@ -817,22 +1273,21 @@ pub async fn reschedule_actor( let next = backoff.step().expect("should not have max retry"); // Sleep for backoff or destroy early - if let Some(sig) = ctx + if let Some(_sig) = ctx .listen_with_timeout::(Instant::from(next) - Instant::now()) .await? { - tracing::debug!("destroying before actor reschedule"); + tracing::debug!("destroying before actor start"); - return Ok(Loop::Break(Err(sig))); + return Ok(Loop::Break(None)); } } if let Some(res) = spawn_actor(ctx, &input, &actor_setup, next_generation).await? { - Ok(Loop::Break(Ok((state.clone(), res)))) + Ok(Loop::Break(Some((state.clone(), res)))) } else { - tracing::debug!(actor_id=?input.actor_id, "failed to reschedule actor, retrying"); - - Ok(Loop::Continue) + // Destroyed early + Ok(Loop::Break(None)) } } .boxed() @@ -840,37 +1295,100 @@ pub async fn reschedule_actor( .await?; // Update loop state - match res { - Ok((reschedule_state, res)) => { - state.generation = next_generation; - state.client_id = res.client_id; - state.client_workflow_id = res.client_workflow_id; + if let Some((reschedule_state, res)) = res { + state.generation = next_generation; + state.runner_id = res.runner_id; + state.client_id = res.client_id; + state.client_workflow_id = res.client_workflow_id; - // Save reschedule state in global state - state.reschedule_state = reschedule_state; + // Save reschedule state in global state + state.reschedule_state = reschedule_state; - // Reset gc timeout once allocated - state.gc_timeout_ts = Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS); + // Reset gc timeout once allocated + state.gc_timeout_ts = Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS); - Ok(None) - } - Err(sig) => Ok(Some(sig)), + Ok(false) + } else { + Ok(true) } } +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct SetPendingAllocationInput { + pending_allocation_ts: i64, +} + +#[activity(SetPendingAllocation)] +pub async fn set_pending_allocation( + ctx: &ActivityCtx, + input: &SetPendingAllocationInput, +) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + + sql_execute!( + [ctx, pool] + " + UPDATE state + SET pending_allocation_ts = ? + ", + input.pending_allocation_ts, + ) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct ClearPendingAllocationInput { + actor_id: util::Id, + pending_allocation_ts: i64, +} + +#[activity(ClearPendingAllocation)] +pub async fn clear_pending_allocation( + ctx: &ActivityCtx, + input: &ClearPendingAllocationInput, +) -> GlobalResult { + // Clear self from alloc queue + let cleared = ctx + .fdb() + .await? + .run(|tx, _mc| async move { + let pending_alloc_key = keys::subspace().pack(&keys::datacenter::PendingActorKey::new( + input.pending_allocation_ts, + input.actor_id, + )); + + let exists = tx.get(&pending_alloc_key, SERIALIZABLE).await?.is_some(); + + tx.clear(&pending_alloc_key); + + Ok(exists) + }) + .await?; + + Ok(cleared) +} + #[derive(Debug, Serialize, Deserialize, Hash)] struct ClearPortsAndResourcesInput { - actor_id: Uuid, + actor_id: util::Id, image_id: Uuid, + runner_id: Uuid, client_id: Uuid, client_workflow_id: Uuid, } +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct ClearPortsAndResourcesOutput { + destroy_runner: bool, +} + #[activity(ClearPortsAndResources)] async fn clear_ports_and_resources( ctx: &ActivityCtx, input: &ClearPortsAndResourcesInput, -) -> GlobalResult<()> { +) -> GlobalResult { let pool = &ctx.sqlite().await?; let ( @@ -906,15 +1424,18 @@ async fn clear_ports_and_resources( )?; let build = unwrap_with!(build_res.builds.first(), BUILD_NOT_FOUND); - ctx.fdb() + let destroy_runner = ctx + .fdb() .await? .run(|tx, _mc| { let ingress_ports = ingress_ports.clone(); async move { destroy::clear_ports_and_resources( input.actor_id, - Some(build.kind), + input.image_id, + Some(build.allocation_type), ingress_ports, + Some(input.runner_id), Some(input.client_id), Some(input.client_workflow_id), selected_resources_memory_mib, @@ -927,7 +1448,7 @@ async fn clear_ports_and_resources( .custom_instrument(tracing::info_span!("actor_clear_ports_and_resources_tx")) .await?; - Ok(()) + Ok(ClearPortsAndResourcesOutput { destroy_runner }) } #[derive(Debug, Serialize, Deserialize, Hash)] @@ -949,3 +1470,53 @@ pub async fn set_finished(ctx: &ActivityCtx, input: &SetFinishedInput) -> Global Ok(()) } + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct ValidateUpgradeInput { + pub initial_build_allocation_type: BuildAllocationType, + pub image_id: Uuid, +} + +#[activity(ValidateUpgrade)] +pub async fn validate_upgrade( + ctx: &ActivityCtx, + input: &ValidateUpgradeInput, +) -> GlobalResult> { + let builds_res = ctx + .op(build::ops::get::Input { + build_ids: vec![input.image_id], + }) + .await?; + + // TODO: Validate build belongs to env/game + let Some(build) = builds_res.builds.into_iter().next() else { + return Ok(Some("Build not found.".into())); + }; + + let uploads_res = op!([ctx] upload_get { + upload_ids: vec![build.upload_id.into()], + }) + .await?; + let upload_complete = unwrap!(uploads_res.uploads.first()).complete_ts.is_some(); + + if !upload_complete { + return Ok(Some("Build upload not complete.".into())); + } + + if build.allocation_type != input.initial_build_allocation_type { + match input.initial_build_allocation_type { + BuildAllocationType::None => { + // NOTE: This should be unreachable because if an old build is encountered the old actor wf is used. + return Ok(Some("Old builds not supported.".into())); + } + BuildAllocationType::Single => { + return Ok(Some("Cannot use container build for actor.".into())); + } + BuildAllocationType::Multi => { + return Ok(Some("Cannot use actor build for container.".into())); + } + } + } + + Ok(None) +} diff --git a/packages/edge/services/pegboard/src/workflows/actor/setup.rs b/packages/edge/services/pegboard/src/workflows/actor/setup.rs index b7171265bf..48bc0bf874 100644 --- a/packages/edge/services/pegboard/src/workflows/actor/setup.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/setup.rs @@ -1,8 +1,13 @@ -use build::types::{BuildCompression, BuildKind}; +use build::types::{BuildAllocationType, BuildCompression, BuildKind, BuildResources}; use chirp_workflow::prelude::*; use cluster::types::BuildDeliveryMethod; -use fdb_util::FormalKey; -use foundationdb as fdb; +use fdb_util::{end_of_key_range, FormalKey, SNAPSHOT}; +use foundationdb::{ + self as fdb, + options::{ConflictRangeType, StreamingMode}, +}; +use futures_util::TryStreamExt; +use rand::Rng; use sqlx::Acquire; use super::{Input, Port}; @@ -15,7 +20,7 @@ use crate::{ pub struct ValidateInput { pub env_id: Uuid, pub tags: util::serde::HashableMap, - pub resources: ActorResources, + pub resources: Option, pub image_id: Uuid, pub root_user_enabled: bool, pub args: Vec, @@ -24,13 +29,11 @@ pub struct ValidateInput { pub network_ports: util::serde::HashableMap, } -// TODO: Redo once a solid global error solution is established so we dont have to have validation all in one -// place. #[activity(Validate)] pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult> { let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; - let (has_tier, upload_res, game_config_res) = tokio::try_join!( + let (tiers, upload_res, game_config_res) = tokio::try_join!( async { let tier_res = ctx .op(tier::ops::list::Input { @@ -38,13 +41,9 @@ pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult< pegboard: true, }) .await?; - let tier_dc = unwrap!(tier_res.datacenters.first()); + let tier_dc = unwrap!(tier_res.datacenters.into_iter().next()); - // Find any tier that has more CPU and memory than the requested resources - GlobalResult::Ok(tier_dc.tiers.iter().any(|t| { - t.cpu_millicores >= input.resources.cpu_millicores - && t.memory >= input.resources.memory_mib - })) + GlobalResult::Ok(tier_dc.tiers) }, async { let builds_res = ctx @@ -87,10 +86,6 @@ pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult< } )?; - if !has_tier { - return Ok(Some("Too many resources allocated.".into())); - } - // TODO: Validate build belongs to env/game let Some((build, upload_complete)) = upload_res else { return Ok(Some("Build not found.".into())); @@ -100,6 +95,44 @@ pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult< return Ok(Some("Build upload not complete.".into())); } + let resources = match build.allocation_type { + BuildAllocationType::None => { + // NOTE: This should be unreachable because if an old build is encountered the old actor wf is used. + return Ok(Some("Old builds not supported.".into())); + } + BuildAllocationType::Single => { + if let Some(resources) = &input.resources { + resources.clone() + } else { + return Ok(Some( + "Actors with builds of `allocation_type` = `single` must specify `resources`." + .into(), + )); + } + } + BuildAllocationType::Multi => { + if input.resources.is_some() { + return Ok(Some("Cannot specify `resources` for actors with builds of `allocation_type` = `multi`.".into())); + } + + let build_resources = unwrap!(build.resources, "multi build should have resources"); + + ActorResources { + cpu_millicores: build_resources.cpu_millicores, + memory_mib: build_resources.memory_mib, + } + } + }; + + // Find any tier that has more CPU and memory than the requested resources + let has_tier = tiers + .iter() + .any(|t| t.cpu_millicores >= resources.cpu_millicores && t.memory >= resources.memory_mib); + + if !has_tier { + return Ok(Some("Too many resources allocated.".into())); + } + let Some(game_config) = game_config_res else { return Ok(Some("Environment not found.".into())); }; @@ -253,27 +286,24 @@ pub async fn disable_tls_ports( #[derive(Debug, Clone, Serialize, Deserialize, Hash)] struct InsertDbInput { - actor_id: Uuid, + actor_id: util::Id, env_id: Uuid, tags: util::serde::HashableMap, - resources: ActorResources, + resources: Option, lifecycle: ActorLifecycle, image_id: Uuid, args: Vec, network_mode: NetworkMode, environment: util::serde::HashableMap, - network_ports: util::serde::HashableMap, } #[activity(InsertDb)] async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult { let pool = ctx.sqlite().await?; - let mut conn = pool.conn().await?; - let mut tx = conn.begin().await?; let create_ts = ctx.ts(); sql_execute!( - [ctx, @tx &mut tx] + [ctx, &pool] " INSERT INTO state ( env_id, @@ -292,8 +322,8 @@ async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult GlobalResult, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct AllocateIngressPortsOutput { + ports: Vec<(GameGuardProtocol, Vec)>, +} + +#[activity(AllocateIngressPorts)] +async fn allocate_ingress_ports( + ctx: &ActivityCtx, + input: &AllocateIngressPortsInput, +) -> GlobalResult { // Count up ports per protocol let mut port_counts = Vec::new(); for (_, port) in &input.network_ports { @@ -324,7 +373,8 @@ async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult GlobalResult { + return Err(fdb::FdbBindingError::CustomError( + "Dynamic allocation not implemented for http/https ports".into(), + )); + } + GameGuardProtocol::Tcp | GameGuardProtocol::TcpTls => { + gg_config.min_ingress_port_tcp()..=gg_config.max_ingress_port_tcp() + } + GameGuardProtocol::Udp => { + gg_config.min_ingress_port_udp()..=gg_config.max_ingress_port_udp() + } + }; + + let mut last_port = None; + let mut ports = Vec::new(); + + // Choose a random starting port for better spread and less cache hits + let mut start = { + // It is important that we don't start at the end of the range so that the logic with + // `last_port` works correctly + let exclusive_port_range = *port_range.start()..*port_range.end(); + rand::thread_rng().gen_range(exclusive_port_range) + }; + + // Build start and end keys for ingress ports subspace + let start_key = keys::subspace() + .subspace(&keys::port::IngressKey2::subspace(*protocol, start)) + .range() + .0; + let end_key = keys::subspace() + .subspace(&keys::port::IngressKey2::subspace( + *protocol, + *port_range.end(), + )) + .range() + .1; + let mut stream = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + ..(start_key, end_key.clone()).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, + // just the one we choose + SNAPSHOT, + ); + + // Continue iterating over the same stream until all of the required ports are found + for _ in 0..*count { + // Iterate through the subspace range until a port is found + let port = loop { + let Some(entry) = stream.try_next().await? else { + match last_port { + Some(port) if port == *port_range.end() => { + // End of range reached, start a new range read from the beginning (wrap around) + if start != *port_range.start() { + last_port = None; + + let old_start = start; + start = *port_range.start(); + + let start_key = keys::subspace() + .subspace(&keys::port::IngressKey2::subspace( + *protocol, start, + )) + .range() + .0; + stream = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + limit: Some(old_start as usize), + ..(start_key, end_key.clone()).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict + // with all of the keys, just the one we choose + SNAPSHOT, + ); + + continue; + } else { + break None; + } + } + // Return port after last port + Some(last_port) => { + break Some(last_port + 1); + } + // No ports were returned (range is empty) + None => { + break Some(start); + } + } + }; + + let key = keys::subspace() + .unpack::(entry.key()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let current_port = key.port; + + if let Some(last_port) = last_port { + // Gap found + if current_port != last_port + 1 { + break Some(last_port + 1); + } + } + + last_port = Some(current_port); + }; + + let Some(port) = port else { + return Err(fdb::FdbBindingError::CustomError( + format!("not enough {protocol} ports available").into(), + )); + }; + + let ingress_port_key = + keys::port::IngressKey2::new(*protocol, port, input.actor_id); + let ingress_port_key_buf = keys::subspace().pack(&ingress_port_key); + + // Add read conflict only for this key + tx.add_conflict_range( + &ingress_port_key_buf, + &end_of_key_range(&ingress_port_key_buf), + ConflictRangeType::Read, + )?; + + // Set key + tx.set( + &ingress_port_key_buf, + &ingress_port_key + .serialize(()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + ports.push(port); + } + + results.push((*protocol, ports)); + } + + Ok(results) + } }) + .custom_instrument(tracing::info_span!("allocate_ingress_ports_tx")) .await?; - let mut ingress_ports = ingress_ports_res - .ports - .into_iter() - .map(|(protocol, ports)| (protocol, ports.into_iter())) - .collect::>(); + + Ok(AllocateIngressPortsOutput { ports }) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +struct InsertPortsInput { + actor_id: util::Id, + network_ports: util::serde::HashableMap, + ingress_ports: Vec<(GameGuardProtocol, Vec)>, +} + +#[activity(InsertPorts)] +async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + let mut conn = pool.conn().await?; + let mut tx = conn.begin().await?; let gg_config = &ctx.config().server()?.rivet.guard; + let mut ingress_ports = input + .ingress_ports + .iter() + .map(|(protocol, ports)| (protocol, ports.clone().into_iter())) + .collect::>(); + for (name, port) in input.network_ports.iter() { match port.routing { Routing::GameGuard { protocol } => { @@ -368,7 +585,7 @@ async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult gg_config.https_port(), GameGuardProtocol::Tcp | GameGuardProtocol::TcpTls | GameGuardProtocol::Udp => { let (_, ports_iter) = unwrap!( - ingress_ports.iter_mut().find(|(p, _)| &protocol == p) + ingress_ports.iter_mut().find(|(p, _)| &&protocol == p) ); unwrap!(ports_iter.next(), "missing ingress port") }, @@ -398,12 +615,12 @@ async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult, create_ts: i64, @@ -414,7 +631,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( ctx.fdb() .await? .run(|tx, _mc| async move { - let create_ts_key = keys::actor::CreateTsKey::new(input.actor_id); + let create_ts_key = keys::actor2::CreateTsKey::new(input.actor_id); tx.set( &keys::subspace().pack(&create_ts_key), &create_ts_key @@ -422,7 +639,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, ); - let workflow_id_key = keys::actor::WorkflowIdKey::new(input.actor_id); + let workflow_id_key = keys::actor2::WorkflowIdKey::new(input.actor_id); tx.set( &keys::subspace().pack(&workflow_id_key), &workflow_id_key @@ -432,8 +649,8 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( // Add env index key let env_actor_key = - keys::env::ActorKey::new(input.env_id, input.create_ts, input.actor_id); - let data = keys::env::ActorKeyData { + keys::env::Actor2Key::new(input.env_id, input.create_ts, input.actor_id); + let data = keys::env::Actor2KeyData { is_destroyed: false, tags: input.tags.clone().into_iter().collect(), }; @@ -453,9 +670,9 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( } #[derive(Debug, Serialize, Deserialize, Hash)] -pub struct GetMetaInput { - pub env_id: Uuid, - pub image_id: Uuid, +struct GetMetaInput { + env_id: Uuid, + image_id: Uuid, } #[derive(Clone, Debug, Serialize, Deserialize, Hash)] @@ -467,13 +684,16 @@ pub struct GetMetaOutput { pub build_file_name: String, pub build_kind: BuildKind, pub build_compression: BuildCompression, + pub build_allocation_type: BuildAllocationType, + pub build_allocation_total_slots: u32, + pub build_resources: Option, pub dc_name_id: String, pub dc_display_name: String, pub dc_build_delivery_method: BuildDeliveryMethod, } #[activity(GetMeta)] -pub async fn get_meta(ctx: &ActivityCtx, input: &GetMetaInput) -> GlobalResult { +async fn get_meta(ctx: &ActivityCtx, input: &GetMetaInput) -> GlobalResult { let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; let (env_res, build_res, dc_res) = tokio::try_join!( @@ -507,6 +727,9 @@ pub async fn get_meta(ctx: &ActivityCtx, input: &GetMetaInput) -> GlobalResult GlobalResult GlobalResult build_compression = ?, root_user_enabled = ? ", - input.meta.project_id, - input.meta.build_kind as i64, - input.meta.build_compression as i64, + input.project_id, + input.build_kind as i64, + input.build_compression as i64, input.root_user_enabled, ) .await?; @@ -557,8 +782,6 @@ pub struct ActorSetupCtx { pub image_id: Uuid, pub meta: GetMetaOutput, pub resources: protocol::Resources, - pub artifact_url_stub: String, - pub fallback_artifact_url: Option, } pub async fn setup( @@ -580,10 +803,23 @@ pub async fn setup( args: input.args.clone(), network_mode: input.network_mode, environment: input.environment.clone(), - network_ports, }) .await?; + let ingress_ports_res = ctx + .activity(AllocateIngressPortsInput { + actor_id: input.actor_id, + network_ports: network_ports.clone(), + }) + .await?; + + ctx.activity(InsertPortsInput { + actor_id: input.actor_id, + network_ports, + ingress_ports: ingress_ports_res.ports, + }) + .await?; + ctx.activity(InsertFdbInput { actor_id: input.actor_id, env_id: input.env_id, @@ -606,36 +842,49 @@ pub async fn setup( ctx.v(2) .activity(InsertMetaInput { - meta: meta.clone(), + project_id: meta.project_id, + build_kind: meta.build_kind, + build_compression: meta.build_compression, root_user_enabled: input.root_user_enabled, }) .await?; - let (resources, artifacts_res) = ctx - .join(( - activity(SelectResourcesInput { - resources: input.resources.clone(), - }), - activity(ResolveArtifactsInput { - build_upload_id: meta.build_upload_id, - build_file_name: meta.build_file_name.clone(), - dc_build_delivery_method: meta.dc_build_delivery_method, - }), - )) + // Use resources from build or from actor config + let resources = match meta.build_allocation_type { + BuildAllocationType::None => bail!("actors do not support old builds"), + BuildAllocationType::Single => unwrap!( + input.resources.clone(), + "single builds should have actor resources" + ), + BuildAllocationType::Multi => { + let build_resources = + unwrap_ref!(meta.build_resources, "multi builds should have resources"); + + ActorResources { + cpu_millicores: build_resources.cpu_millicores, + memory_mib: build_resources.memory_mib, + } + } + }; + + let resources = ctx + .activity(SelectResourcesInput { + cpu_millicores: resources.cpu_millicores, + memory_mib: resources.memory_mib, + }) .await?; Ok(ActorSetupCtx { image_id, meta, resources, - artifact_url_stub: artifacts_res.artifact_url_stub, - fallback_artifact_url: artifacts_res.fallback_artifact_url, }) } #[derive(Debug, Serialize, Deserialize, Hash)] struct SelectResourcesInput { - resources: ActorResources, + cpu_millicores: u32, + memory_mib: u32, } #[activity(SelectResources)] @@ -660,10 +909,9 @@ async fn select_resources( // Find the first tier that has more CPU and memory than the requested // resources let tier = unwrap!( - tiers.iter().find(|t| { - t.cpu_millicores >= input.resources.cpu_millicores - && t.memory >= input.resources.memory_mib - }), + tiers + .iter() + .find(|t| { t.cpu_millicores >= input.cpu_millicores && t.memory >= input.memory_mib }), "no suitable tier found" ); @@ -695,74 +943,3 @@ async fn select_resources( disk: tier.disk, }) } - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct ResolveArtifactsInput { - build_upload_id: Uuid, - build_file_name: String, - dc_build_delivery_method: BuildDeliveryMethod, -} - -#[derive(Debug, Serialize, Deserialize)] -struct ResolveArtifactsOutput { - artifact_url_stub: String, - fallback_artifact_url: Option, - #[serde(default)] - artifact_size: u64, -} - -#[activity(ResolveArtifacts)] -async fn resolve_artifacts( - ctx: &ActivityCtx, - input: &ResolveArtifactsInput, -) -> GlobalResult { - // Get the fallback URL - let fallback_artifact_url = { - tracing::debug!("using s3 direct delivery"); - - // Build client - let s3_client = s3_util::Client::with_bucket_and_endpoint( - ctx.config(), - "bucket-build", - s3_util::EndpointKind::EdgeInternal, - ) - .await?; - - let presigned_req = s3_client - .get_object() - .bucket(s3_client.bucket()) - .key(format!( - "{upload_id}/{file_name}", - upload_id = input.build_upload_id, - file_name = input.build_file_name, - )) - .presigned( - s3_util::aws_sdk_s3::presigning::PresigningConfig::builder() - .expires_in(std::time::Duration::from_secs(15 * 60)) - .build()?, - ) - .await?; - - let addr_str = presigned_req.uri().to_string(); - tracing::debug!(addr = %addr_str, "resolved artifact s3 presigned request"); - - addr_str - }; - - // Get the artifact size - let uploads_res = op!([ctx] upload_get { - upload_ids: vec![input.build_upload_id.into()], - }) - .await?; - let upload = unwrap!(uploads_res.uploads.first()); - - Ok(ResolveArtifactsOutput { - artifact_url_stub: crate::util::image_artifact_url_stub( - ctx.config(), - input.build_upload_id, - &input.build_file_name, - )?, - fallback_artifact_url: Some(fallback_artifact_url), - artifact_size: upload.content_length, - }) -} diff --git a/packages/edge/services/pegboard/src/workflows/actor2/analytics.rs b/packages/edge/services/pegboard/src/workflows/actor/v1/analytics.rs similarity index 95% rename from packages/edge/services/pegboard/src/workflows/actor2/analytics.rs rename to packages/edge/services/pegboard/src/workflows/actor/v1/analytics.rs index a0e349eca8..708fea1dd0 100644 --- a/packages/edge/services/pegboard/src/workflows/actor2/analytics.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/v1/analytics.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; #[derive(Debug, Serialize, Deserialize, Hash)] pub struct InsertClickHouseInput { - pub actor_id: util::Id, + pub actor_id: Uuid, } /// Row to be inserted in to ClickHouse @@ -25,7 +25,6 @@ pub struct ActorClickHouseRow { network_ports_proxied: HashMap, client_id: Uuid, client_wan_hostname: String, - runner_id: Uuid, selected_cpu_millicores: u32, selected_memory_mib: u32, root_user_enabled: bool, @@ -45,8 +44,6 @@ pub struct ActorClickHouseRow { /// 0 = not set started_at: i64, /// See `started_at`. - pending_allocation_at: i64, - /// See `started_at`. connectable_at: i64, /// See `started_at`. finished_at: i64, @@ -98,11 +95,9 @@ struct StateRow { selected_resources_memory_mib: Option, client_id: Option, client_wan_hostname: Option, - runner_id: Option, lifecycle_kill_timeout_ms: i64, lifecycle_durable: bool, create_ts: i64, - pending_allocation_ts: Option, start_ts: Option, connectable_ts: Option, finish_ts: Option, @@ -147,11 +142,9 @@ pub async fn insert_clickhouse( selected_resources_memory_mib, client_id, client_wan_hostname, - runner_id, lifecycle_kill_timeout_ms, lifecycle_durable, create_ts, - pending_allocation_ts, start_ts, connectable_ts, finish_ts, @@ -276,7 +269,6 @@ pub async fn insert_clickhouse( network_ports_proxied: proxied_ports, client_id: state_row.client_id.unwrap_or_default(), client_wan_hostname: state_row.client_wan_hostname.unwrap_or_default(), - runner_id: state_row.runner_id.unwrap_or_default(), selected_cpu_millicores: state_row .selected_resources_cpu_millicores .unwrap_or_default() as u32, @@ -295,10 +287,6 @@ pub async fn insert_clickhouse( cpu_millicores: state_row.resources_cpu_millicores, memory_mib: state_row.resources_memory_mib, created_at: state_row.create_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) - pending_allocation_at: state_row - .pending_allocation_ts - .map(|ts| ts * 1_000_000) - .unwrap_or_default(), started_at: state_row .start_ts .map(|ts| ts * 1_000_000) diff --git a/packages/edge/services/pegboard/src/workflows/actor/v1/destroy.rs b/packages/edge/services/pegboard/src/workflows/actor/v1/destroy.rs new file mode 100644 index 0000000000..4bf977c07b --- /dev/null +++ b/packages/edge/services/pegboard/src/workflows/actor/v1/destroy.rs @@ -0,0 +1,374 @@ +use build::types::BuildKind; +use chirp_workflow::prelude::*; +use fdb_util::{FormalKey, SERIALIZABLE}; +use foundationdb as fdb; +use nix::sys::signal::Signal; + +use super::{analytics::InsertClickHouseInput, DestroyComplete, DestroyStarted}; +use crate::{keys, protocol, types::GameGuardProtocol}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct KillCtx { + pub generation: u32, + pub kill_timeout_ms: i64, +} + +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct Input { + pub actor_id: Uuid, + pub build_kind: Option, + /// Whether or not to send signals to the pb actor. In the case that the actor was already stopped + /// or exited, signals are unnecessary. + pub kill: Option, +} + +#[workflow] +pub(crate) async fn pegboard_actor_destroy( + ctx: &mut WorkflowCtx, + input: &Input, +) -> GlobalResult<()> { + ctx.msg(DestroyStarted {}) + .tag("actor_id", input.actor_id) + .send() + .await?; + + let actor = ctx.activity(UpdateDbInput {}).await?; + + if let Some(actor) = actor { + let client_workflow_id = actor.client_workflow_id; + + ctx.activity(UpdateFdbInput { + actor_id: input.actor_id, + build_kind: input.build_kind, + actor, + }) + .await?; + + if let (Some(client_workflow_id), Some(kill_data)) = (client_workflow_id, &input.kill) { + kill( + ctx, + input.actor_id, + kill_data.generation, + client_workflow_id, + kill_data.kill_timeout_ms, + false, + ) + .await?; + } + } + + // Update ClickHouse analytics with destroyed timestamp + ctx.v(2) + .activity(InsertClickHouseInput { + actor_id: input.actor_id, + }) + .await?; + + ctx.msg(DestroyComplete {}) + .tag("actor_id", input.actor_id) + .send() + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct UpdateDbInput {} + +#[derive(Debug, Serialize, Deserialize, Hash, sqlx::FromRow)] +struct UpdateDbOutput { + env_id: Uuid, + selected_resources_memory_mib: Option, + selected_resources_cpu_millicores: Option, + tags: sqlx::types::Json>, + create_ts: i64, + client_id: Option, + client_workflow_id: Option, +} + +#[activity(UpdateDb)] +async fn update_db( + ctx: &ActivityCtx, + input: &UpdateDbInput, +) -> GlobalResult> { + let pool = ctx.sqlite().await?; + + sql_fetch_optional!( + [ctx, UpdateDbOutput, pool] + " + UPDATE state + SET destroy_ts = ? + WHERE destroy_ts IS NULL + RETURNING + env_id, + selected_resources_memory_mib, + selected_resources_cpu_millicores, + json(tags) AS tags, + create_ts, + client_id, + client_workflow_id + ", + ctx.ts(), + ) + .await +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct UpdateFdbInput { + actor_id: Uuid, + build_kind: Option, + actor: UpdateDbOutput, +} + +#[activity(UpdateFdb)] +pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + + let ingress_ports = sql_fetch_all!( + [ctx, (i64, i64), pool] + " + SELECT protocol, ingress_port_number + FROM ports_ingress + ", + ) + .await?; + + ctx.fdb() + .await? + .run(|tx, _mc| { + let ingress_ports = ingress_ports.clone(); + async move { + // Update actor key index in env subspace + let actor_key = keys::env::ActorKey::new( + input.actor.env_id, + input.actor.create_ts, + input.actor_id, + ); + let data = keys::env::ActorKeyData { + is_destroyed: true, + tags: input.actor.tags.0.clone().into_iter().collect(), + }; + tx.set( + &keys::subspace().pack(&actor_key), + &actor_key + .serialize(data) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + clear_ports_and_resources( + input.actor_id, + input.build_kind, + ingress_ports, + input.actor.client_id, + input.actor.client_workflow_id, + input.actor.selected_resources_memory_mib, + input.actor.selected_resources_cpu_millicores, + &tx, + ) + .await + } + }) + .custom_instrument(tracing::info_span!("actor_destroy_tx")) + .await?; + + Ok(()) +} + +// TODO: Clean up args +/// Clears allocated ports and resources (if they were allocated). +pub(crate) async fn clear_ports_and_resources( + actor_id: Uuid, + build_kind: Option, + ingress_ports: Vec<(i64, i64)>, + client_id: Option, + client_workflow_id: Option, + selected_resources_memory_mib: Option, + selected_resources_cpu_millicores: Option, + tx: &fdb::RetryableTransaction, +) -> Result<(), fdb::FdbBindingError> { + // Remove all allocated ingress ports + for (protocol, port) in ingress_ports { + let protocol = GameGuardProtocol::from_repr( + usize::try_from(protocol).map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ) + .ok_or_else(|| { + fdb::FdbBindingError::CustomError( + format!("invalid protocol variant: {protocol}").into(), + ) + })?; + let port = u16::try_from(port).map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + let ingress_port_key = keys::port::IngressKey::new(protocol, port, actor_id); + + tx.clear(&keys::subspace().pack(&ingress_port_key)); + + let ingress_port_key2 = keys::port::IngressKey::new(protocol, port, actor_id.into()); + + tx.clear(&keys::subspace().pack(&ingress_port_key2)); + } + + // Remove proxied ports + let proxied_ports_key = keys::actor::ProxiedPortsKey::new(actor_id); + tx.clear(&keys::subspace().pack(&proxied_ports_key)); + + if let Some(client_id) = client_id { + // This is cleared when the state changes as well as when the actor is destroyed to ensure + // consistency during rescheduling and forced deletion. + let actor_key = keys::client::ActorKey::new(client_id, actor_id); + tx.clear(&keys::subspace().pack(&actor_key)); + } + + // Release client's resources and update allocation index + if let ( + Some(build_kind), + Some(client_id), + Some(client_workflow_id), + Some(selected_resources_memory_mib), + Some(selected_resources_cpu_millicores), + ) = ( + build_kind, + client_id, + client_workflow_id, + selected_resources_memory_mib, + selected_resources_cpu_millicores, + ) { + let client_flavor = match build_kind { + BuildKind::DockerImage | BuildKind::OciBundle => protocol::ClientFlavor::Container, + BuildKind::JavaScript => protocol::ClientFlavor::Isolate, + }; + + let remaining_mem_key = keys::client::RemainingMemoryKey::new(client_id); + let remaining_mem_key_buf = keys::subspace().pack(&remaining_mem_key); + let remaining_cpu_key = keys::client::RemainingCpuKey::new(client_id); + let remaining_cpu_key_buf = keys::subspace().pack(&remaining_cpu_key); + let last_ping_ts_key = keys::client::LastPingTsKey::new(client_id); + let last_ping_ts_key_buf = keys::subspace().pack(&last_ping_ts_key); + + let (remaining_mem_entry, remaining_cpu_entry, last_ping_ts_entry) = tokio::try_join!( + tx.get(&remaining_mem_key_buf, SERIALIZABLE), + tx.get(&remaining_cpu_key_buf, SERIALIZABLE), + tx.get(&last_ping_ts_key_buf, SERIALIZABLE), + )?; + + let remaining_mem = remaining_mem_key + .deserialize( + &remaining_mem_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {remaining_mem_key:?}").into(), + ))?, + ) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let remaining_cpu = remaining_cpu_key + .deserialize( + &remaining_cpu_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {remaining_cpu_key:?}").into(), + ))?, + ) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let last_ping_ts = last_ping_ts_key + .deserialize(&last_ping_ts_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {last_ping_ts_key:?}").into(), + ))?) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + let old_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( + client_flavor, + remaining_mem, + last_ping_ts, + client_id, + ); + let old_allocation_key_buf = keys::subspace().pack(&old_allocation_key); + + let new_mem = remaining_mem + + u64::try_from(selected_resources_memory_mib) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + let new_cpu = remaining_cpu + + u64::try_from(selected_resources_cpu_millicores) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + tracing::debug!( + old_mem=%remaining_mem, + old_cpu=%remaining_cpu, + %new_mem, + %new_cpu, + "releasing resources" + ); + + // Write new memory + tx.set( + &remaining_mem_key_buf, + &remaining_mem_key + .serialize(new_mem) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + // Write new cpu + tx.set( + &remaining_cpu_key_buf, + &remaining_cpu_key + .serialize(new_cpu) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + // Only update allocation idx if it existed before + if tx + .get(&old_allocation_key_buf, SERIALIZABLE) + .await? + .is_some() + { + // Clear old key + tx.clear(&old_allocation_key_buf); + + let new_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( + client_flavor, + new_mem, + last_ping_ts, + client_id, + ); + let new_allocation_key_buf = keys::subspace().pack(&new_allocation_key); + + tx.set( + &new_allocation_key_buf, + &new_allocation_key + .serialize(client_workflow_id) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + } + } + + Ok(()) +} + +pub(crate) async fn kill( + ctx: &mut WorkflowCtx, + actor_id: Uuid, + generation: u32, + client_workflow_id: Uuid, + kill_timeout_ms: i64, + persist_storage: bool, +) -> GlobalResult<()> { + if kill_timeout_ms != 0 { + ctx.signal(protocol::Command::SignalActor { + actor_id: actor_id.into(), + generation, + signal: Signal::SIGTERM as i32, + persist_storage, + }) + .to_workflow_id(client_workflow_id) + .send() + .await?; + + // See `docs/packages/job/JOB_DRAINING_AND_KILL_TIMEOUTS.md` + ctx.sleep(kill_timeout_ms).await?; + } + + ctx.signal(protocol::Command::SignalActor { + actor_id: actor_id.into(), + generation, + signal: Signal::SIGKILL as i32, + persist_storage, + }) + .to_workflow_id(client_workflow_id) + .send() + .await?; + + Ok(()) +} diff --git a/packages/edge/services/pegboard/src/workflows/actor2/migrations.rs b/packages/edge/services/pegboard/src/workflows/actor/v1/migrations.rs similarity index 64% rename from packages/edge/services/pegboard/src/workflows/actor2/migrations.rs rename to packages/edge/services/pegboard/src/workflows/actor/v1/migrations.rs index ede6f53156..fefbc34d5a 100644 --- a/packages/edge/services/pegboard/src/workflows/actor2/migrations.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/v1/migrations.rs @@ -3,6 +3,7 @@ use sqlx::Acquire; pub async fn run(ctx: &mut WorkflowCtx) -> GlobalResult<()> { ctx.activity(MigrateInitInput {}).await?; + ctx.v(2).activity(MigrateExtraMetaInput {}).await?; Ok(()) } @@ -12,6 +13,7 @@ struct MigrateInitInput {} #[activity(MigrateInit)] async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalResult<()> { + // Transactions make migrations atomic let pool = ctx.sqlite().await?; let mut conn = pool.conn().await?; let mut tx = conn.begin().await?; @@ -20,21 +22,16 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes [ctx, @tx &mut tx] " CREATE TABLE state ( - -- Updated later - project_id BLOB NOT NULL DEFAULT X'00000000000000000000000000000000', -- UUID - env_id BLOB NOT NULL, -- UUID tags BLOB NOT NULL, -- JSONB, map - - resources_cpu_millicores INT, - resources_memory_mib INT, + + resources_cpu_millicores INT NOT NULL, + resources_memory_mib INT NOT NULL, -- Chosen based on tier selected_resources_cpu_millicores INT, selected_resources_memory_mib INT, - old_runner_id BLOB, -- UUID - runner_id BLOB, -- UUID client_id BLOB, -- UUID client_workflow_id BLOB, -- UUID client_wan_hostname TEXT, @@ -43,7 +40,6 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes lifecycle_durable INT NOT NULL DEFAULT false, -- BOOLEAN create_ts INT NOT NULL, - pending_allocation_ts INT, -- Set if currently pending alloc start_ts INT, connectable_ts INT, finish_ts INT, @@ -52,12 +48,7 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes image_id BLOB NOT NULL, -- UUID args BLOB NOT NULL, -- JSONB, list network_mode INT NOT NULL, -- pegboard::types::NetworkMode - environment BLOB NOT NULL, -- JSONB, map - - -- Updated later - root_user_enabled INT NOT NULL DEFAULT false, - build_kind INT NOT NULL DEFAULT -1, - build_compression INT NOT NULL DEFAULT -1 + environment BLOB NOT NULL -- JSONB, map ) STRICT; CREATE TABLE ports_ingress ( @@ -86,3 +77,28 @@ async fn migrate_init(ctx: &ActivityCtx, _input: &MigrateInitInput) -> GlobalRes Ok(()) } + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct MigrateExtraMetaInput {} + +#[activity(MigrateExtraMeta)] +async fn migrate_extra_meta(ctx: &ActivityCtx, _input: &MigrateExtraMetaInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + let mut conn = pool.conn().await?; + let mut tx = conn.begin().await?; + + sql_execute!( + [ctx, @tx &mut tx] + " + ALTER TABLE state ADD project_id BLOB DEFAULT X'00000000000000000000000000000000'; -- UUID + ALTER TABLE state ADD root_user_enabled INT DEFAULT false; + ALTER TABLE state ADD build_kind INT DEFAULT -1; + ALTER TABLE state ADD build_compression INT DEFAULT -1; + ", + ) + .await?; + + tx.commit().await?; + + Ok(()) +} diff --git a/packages/edge/services/pegboard/src/workflows/actor2/mod.rs b/packages/edge/services/pegboard/src/workflows/actor/v1/mod.rs similarity index 76% rename from packages/edge/services/pegboard/src/workflows/actor2/mod.rs rename to packages/edge/services/pegboard/src/workflows/actor/v1/mod.rs index 7f6673581c..2c82f7ec9c 100644 --- a/packages/edge/services/pegboard/src/workflows/actor2/mod.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/v1/mod.rs @@ -2,11 +2,11 @@ use analytics::InsertClickHouseInput; use chirp_workflow::prelude::*; use destroy::KillCtx; use futures_util::FutureExt; +use rivet_util::serde::HashableMap; use crate::{ protocol, types::{ActorLifecycle, ActorResources, EndpointType, NetworkMode, Routing}, - workflows::client::AllocatePendingActorsInput, }; mod analytics; @@ -32,18 +32,17 @@ const RETRY_RESET_DURATION_MS: i64 = util::duration::minutes(10); #[derive(Clone, Debug, Serialize, Deserialize, Hash)] pub struct Input { - pub actor_id: util::Id, + pub actor_id: Uuid, pub env_id: Uuid, - pub tags: util::serde::HashableMap, - /// Used to override image resources. - pub resources: Option, + pub tags: HashableMap, + pub resources: ActorResources, pub lifecycle: ActorLifecycle, pub image_id: Uuid, pub root_user_enabled: bool, pub args: Vec, pub network_mode: NetworkMode, - pub environment: util::serde::HashableMap, - pub network_ports: util::serde::HashableMap, + pub environment: HashableMap, + pub network_ports: HashableMap, pub endpoint_type: Option, } @@ -55,7 +54,7 @@ pub struct Port { } #[workflow] -pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { +pub async fn pegboard_actor(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { migrations::run(ctx).await?; let validation_res = ctx @@ -112,9 +111,7 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu ctx.workflow(destroy::Input { actor_id: input.actor_id, - generation: 0, - image_id: input.image_id, - build_allocation_type: None, + build_kind: None, kill: None, }) .output() @@ -136,14 +133,17 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu .send() .await?; - let Some(allocate_res) = runtime::spawn_actor(ctx, input, &initial_actor_setup, 0).await? - else { - // Destroyed early + let Some(res) = runtime::spawn_actor(ctx, input, &initial_actor_setup, 0).await? else { + ctx.msg(Failed { + message: "Failed to allocate (no availability).".into(), + }) + .tag("actor_id", input.actor_id) + .send() + .await?; + ctx.workflow(destroy::Input { actor_id: input.actor_id, - generation: 0, - image_id: input.image_id, - build_allocation_type: Some(initial_actor_setup.meta.build_allocation_type), + build_kind: Some(initial_actor_setup.meta.build_kind), kill: None, }) .output() @@ -152,14 +152,17 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu return Ok(()); }; - let lifecycle_res = ctx + ctx.v(2) + .msg(Allocated { + client_id: res.client_id, + }) + .tag("actor_id", input.actor_id) + .send() + .await?; + + let state_res = ctx .loope( - runtime::State::new( - allocate_res.runner_id, - allocate_res.client_id, - allocate_res.client_workflow_id, - input.image_id, - ), + runtime::State::new(res.client_id, res.client_workflow_id, input.image_id), |ctx, state| { let input = input.clone(); @@ -185,14 +188,21 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu ) .await?; - if runtime::reschedule_actor(ctx, &input, state, state.image_id).await? + if let Some(sig) = runtime::reschedule_actor( + ctx, + &input, + state, + state.image_id.unwrap_or(input.image_id), + ) + .await? { // Destroyed early - return Ok(Loop::Break(runtime::LifecycleRes { - generation: state.generation, - image_id: state.image_id, + return Ok(Loop::Break(runtime::StateRes { kill: Some(KillCtx { - kill_timeout_ms: input.lifecycle.kill_timeout_ms, + generation: state.generation, + kill_timeout_ms: sig + .override_kill_timeout_ms + .unwrap_or(input.lifecycle.kill_timeout_ms), }), })); } else { @@ -200,10 +210,9 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu return Ok(Loop::Continue); } } else { - return Ok(Loop::Break(runtime::LifecycleRes { - generation: state.generation, - image_id: state.image_id, + return Ok(Loop::Break(runtime::StateRes { kill: Some(KillCtx { + generation: state.generation, kill_timeout_ms: input.lifecycle.kill_timeout_ms, }), })); @@ -244,11 +253,7 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu protocol::ActorState::Starting => { state.gc_timeout_ts = None; - ctx.activity(runtime::SetStartedInput { - actor_id: input.actor_id, - generation: state.generation, - }) - .await?; + ctx.activity(runtime::SetStartedInput {}).await?; } protocol::ActorState::Running { ports, .. } => { ctx.join(( @@ -262,6 +267,9 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu )) .await?; + // Old traefik timeout + ctx.removed::>().await?; + let updated = ctx .activity(runtime::SetConnectableInput { connectable: true, @@ -329,16 +337,15 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu ctx, &input, state, - state.image_id, + state.image_id.unwrap_or(input.image_id), ) .await? + .is_some() { // Destroyed early - return Ok(Loop::Break(runtime::LifecycleRes { - generation: state.generation, - image_id: state.image_id, - // None here because if we received the destroy signal, it is - // guaranteed that we did not allocate another actor. + return Ok(Loop::Break(runtime::StateRes { + // Destroy actor is none here because if we received the destroy + // signal, it is guaranteed that we did not allocate another actor. kill: None, })); } @@ -356,12 +363,13 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu .await?; } - return Ok(Loop::Break(runtime::LifecycleRes { - generation: state.generation, - image_id: state.image_id, + return Ok(Loop::Break(runtime::StateRes { // No need to kill if already exited kill: matches!(sig.state, protocol::ActorState::Lost) - .then_some(KillCtx { kill_timeout_ms: 0 }), + .then_some(KillCtx { + generation: state.generation, + kill_timeout_ms: 0, + }), })); } } @@ -391,16 +399,23 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu image_id: sig.image_id, }) .await?; - state.image_id = sig.image_id; + state.image_id = Some(sig.image_id); - if runtime::reschedule_actor(ctx, &input, state, state.image_id).await? + if let Some(sig) = runtime::reschedule_actor( + ctx, + &input, + state, + state.image_id.unwrap_or(input.image_id), + ) + .await? { // Destroyed early - return Ok(Loop::Break(runtime::LifecycleRes { - generation: state.generation, - image_id: input.image_id, + return Ok(Loop::Break(runtime::StateRes { kill: Some(KillCtx { - kill_timeout_ms: input.lifecycle.kill_timeout_ms, + generation: state.generation, + kill_timeout_ms: sig + .override_kill_timeout_ms + .unwrap_or(input.lifecycle.kill_timeout_ms), }), })); } @@ -420,10 +435,9 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu state.drain_timeout_ts = None; } Main::Destroy(sig) => { - return Ok(Loop::Break(runtime::LifecycleRes { - generation: state.generation, - image_id: input.image_id, + return Ok(Loop::Break(runtime::StateRes { kill: Some(KillCtx { + generation: state.generation, kill_timeout_ms: sig .override_kill_timeout_ms .unwrap_or(input.lifecycle.kill_timeout_ms), @@ -441,35 +455,23 @@ pub async fn pegboard_actor2(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu ctx.workflow(destroy::Input { actor_id: input.actor_id, - generation: lifecycle_res.generation, - image_id: lifecycle_res.image_id, - build_allocation_type: Some(initial_actor_setup.meta.build_allocation_type), - kill: lifecycle_res.kill, + build_kind: Some(initial_actor_setup.meta.build_kind.clone()), + kill: state_res.kill, }) .output() .await?; - // NOTE: The reason we allocate other actors from this actor workflow is because if we instead sent a - // signal to the client wf here it would incur a heavy throughput hit and we need the client wf to be as - // lightweight as possible; processing as few signals that aren't events/commands as possible - // Allocate other pending actors from queue - let res = ctx.activity(AllocatePendingActorsInput {}).await?; - - // Dispatch pending allocs - for alloc in res.allocations { - ctx.signal(alloc.signal) - .to_workflow::() - .tag("actor_id", alloc.actor_id) - .send() - .await?; - } - Ok(()) } #[message("pegboard_actor_create_complete")] pub struct CreateComplete {} +#[message("pegboard_actor_allocated")] +pub struct Allocated { + pub client_id: Uuid, +} + #[message("pegboard_actor_failed")] pub struct Failed { pub message: String, @@ -478,15 +480,6 @@ pub struct Failed { #[message("pegboard_actor_ready")] pub struct Ready {} -#[signal("pegboard_actor_allocate")] -#[derive(Debug)] -pub struct Allocate { - pub runner_id: Uuid, - pub new_runner: bool, - pub client_id: Uuid, - pub client_workflow_id: Uuid, -} - #[signal("pegboard_actor_destroy")] pub struct Destroy { pub override_kill_timeout_ms: Option, @@ -524,12 +517,6 @@ pub struct UpgradeStarted {} #[message("pegboard_actor_upgrade_complete")] pub struct UpgradeComplete {} -join_signal!(PendingAllocation { - Allocate, - Destroy, - // -}); - join_signal!(Main { StateUpdate, Upgrade, @@ -537,3 +524,14 @@ join_signal!(Main { Undrain, Destroy, }); + +// Stub definition +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct WaitForTraefikPollInput {} +#[activity(WaitForTraefikPoll)] +pub async fn wait_for_traefik_poll( + _ctx: &ActivityCtx, + _input: &WaitForTraefikPollInput, +) -> GlobalResult<()> { + Ok(()) +} diff --git a/packages/edge/services/pegboard/src/workflows/actor/v1/runtime.rs b/packages/edge/services/pegboard/src/workflows/actor/v1/runtime.rs new file mode 100644 index 0000000000..9ad5454904 --- /dev/null +++ b/packages/edge/services/pegboard/src/workflows/actor/v1/runtime.rs @@ -0,0 +1,951 @@ +use std::time::Instant; + +use build::types::BuildKind; +use chirp_workflow::prelude::*; +use fdb_util::{end_of_key_range, FormalKey, SERIALIZABLE, SNAPSHOT}; +use foundationdb::{ + self as fdb, + options::{ConflictRangeType, StreamingMode}, +}; +use futures_util::{FutureExt, TryStreamExt}; +use sqlx::Acquire; + +use super::{ + destroy::{self, KillCtx}, + setup, Destroy, Input, ACTOR_START_THRESHOLD_MS, BASE_RETRY_TIMEOUT_MS, + RETRY_RESET_DURATION_MS, +}; +use crate::{ + keys, metrics, + ops::actor::get, + protocol, + types::{EndpointType, GameGuardProtocol, HostProtocol, NetworkMode, Port, Routing}, + workflows::client::CLIENT_ELIGIBLE_THRESHOLD_MS, +}; + +#[derive(Deserialize, Serialize)] +pub struct State { + pub generation: u32, + + pub client_id: Uuid, + pub client_workflow_id: Uuid, + pub image_id: Option, + + pub drain_timeout_ts: Option, + pub gc_timeout_ts: Option, + + #[serde(default)] + reschedule_state: RescheduleState, +} + +impl State { + pub fn new(client_id: Uuid, client_workflow_id: Uuid, image_id: Uuid) -> Self { + State { + generation: 0, + client_id, + client_workflow_id, + image_id: Some(image_id), + drain_timeout_ts: None, + gc_timeout_ts: Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS), + reschedule_state: RescheduleState::default(), + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct StateRes { + pub kill: Option, +} + +#[derive(Serialize, Deserialize, Clone, Default)] +struct RescheduleState { + last_retry_ts: i64, + retry_count: usize, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct UpdateClientInput { + client_id: Uuid, + client_workflow_id: Uuid, +} + +#[activity(UpdateClient)] +async fn update_client(ctx: &ActivityCtx, input: &UpdateClientInput) -> GlobalResult<()> { + let client_pool = ctx.sqlite_for_workflow(input.client_workflow_id).await?; + let pool = ctx.sqlite().await?; + + let (client_wan_hostname,) = sql_fetch_one!( + [ctx, (String,), client_pool] + " + SELECT config->'network'->>'wan_hostname' AS wan_hostname + FROM state + ", + ) + .await?; + + sql_execute!( + [ctx, pool] + " + UPDATE state + SET + client_id = ?, + client_workflow_id = ?, + client_wan_hostname = ? + ", + input.client_id, + input.client_workflow_id, + &client_wan_hostname, + ) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct FetchPortsInput { + actor_id: Uuid, + endpoint_type: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct FetchPortsOutput { + ports: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +struct FetchedPort { + name: String, + port_number: Option, + port: Port, +} + +#[activity(FetchPorts)] +async fn fetch_ports(ctx: &ActivityCtx, input: &FetchPortsInput) -> GlobalResult { + let pool = ctx.sqlite().await?; + + let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; + + let ((wan_hostname,), port_ingress_rows, port_host_rows, dc_res) = tokio::try_join!( + sql_fetch_one!( + [ctx, (Option,), &pool] + " + SELECT client_wan_hostname + FROM state + ", + ), + sql_fetch_all!( + [ctx, get::PortIngress, &pool] + " + SELECT + port_name, + port_number, + ingress_port_number, + protocol + FROM ports_ingress + ", + ), + sql_fetch_all!( + [ctx, get::PortHost, &pool] + " + SELECT port_name, port_number, protocol + FROM ports_host + ", + ), + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![dc_id], + }), + )?; + + let dc = unwrap!(dc_res.datacenters.first()); + + let endpoint_type = input.endpoint_type.unwrap_or_else(|| { + EndpointType::default_for_guard_public_hostname(&dc.guard_public_hostname) + }); + + let ports = port_ingress_rows + .into_iter() + .map(|row| { + let port = get::create_port_ingress( + input.actor_id.into(), + &row, + unwrap!(GameGuardProtocol::from_repr(row.protocol.try_into()?)), + endpoint_type, + &dc.guard_public_hostname, + )?; + + Ok(FetchedPort { + name: row.port_name, + port_number: row.port_number.map(TryInto::try_into).transpose()?, + port, + }) + }) + .chain(port_host_rows.into_iter().map(|row| { + let port = get::create_port_host( + true, + wan_hostname.as_deref(), + &row, + // Placeholder, will be replaced in the isolate runner when building + // metadata + Some(&get::PortProxied { + port_name: String::new(), + source: 0, + }), + )?; + + Ok(FetchedPort { + name: row.port_name, + port_number: row.port_number.map(TryInto::try_into).transpose()?, + port, + }) + })) + .collect::>>()?; + + Ok(FetchPortsOutput { ports }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct AllocateActorInputV1 { + actor_id: Uuid, + build_kind: BuildKind, + resources: protocol::Resources, +} + +#[activity(AllocateActorV1)] +async fn allocate_actor( + ctx: &ActivityCtx, + input: &AllocateActorInputV1, +) -> GlobalResult> { + AllocateActorV2::run( + ctx, + &AllocateActorInputV2 { + actor_id: input.actor_id, + generation: 0, + build_kind: input.build_kind, + resources: input.resources.clone(), + }, + ) + .await +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct AllocateActorInputV2 { + actor_id: Uuid, + generation: u32, + build_kind: BuildKind, + resources: protocol::Resources, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct AllocateActorOutputV2 { + pub client_id: Uuid, + pub client_workflow_id: Uuid, +} + +#[activity(AllocateActorV2)] +async fn allocate_actor_v2( + ctx: &ActivityCtx, + input: &AllocateActorInputV2, +) -> GlobalResult> { + let client_flavor = match input.build_kind { + BuildKind::DockerImage | BuildKind::OciBundle => protocol::ClientFlavor::Container, + BuildKind::JavaScript => protocol::ClientFlavor::Isolate, + }; + let memory_mib = input.resources.memory / 1024 / 1024; + + let start_instant = Instant::now(); + + let res = ctx + .fdb() + .await? + .run(|tx, _mc| async move { + let ping_threshold_ts = util::timestamp::now() - CLIENT_ELIGIBLE_THRESHOLD_MS; + + // Select a range that only includes clients that have enough remaining mem to allocate this actor + let start = keys::subspace().pack( + &keys::datacenter::ClientsByRemainingMemKey::subspace_with_mem( + client_flavor, + memory_mib, + ), + ); + let client_allocation_subspace = + keys::datacenter::ClientsByRemainingMemKey::subspace(client_flavor); + let end = keys::subspace() + .subspace(&client_allocation_subspace) + .range() + .1; + + let mut stream = tx.get_ranges_keyvalues( + fdb::RangeOption { + mode: StreamingMode::Iterator, + // Containers bin pack so we reverse the order + reverse: matches!(client_flavor, protocol::ClientFlavor::Container), + ..(start, end).into() + }, + // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, just + // the one we choose + SNAPSHOT, + ); + + loop { + let Some(entry) = stream.try_next().await? else { + return Ok(None); + }; + + let old_allocation_key = keys::subspace() + .unpack::(entry.key()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Scan by last ping + if old_allocation_key.last_ping_ts < ping_threshold_ts { + continue; + } + + let client_workflow_id = old_allocation_key + .deserialize(entry.value()) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Add read conflict only for this key + tx.add_conflict_range( + entry.key(), + &end_of_key_range(entry.key()), + ConflictRangeType::Read, + )?; + + // Clear old entry + tx.clear(entry.key()); + + // Read old cpu + let remaining_cpu_key = + keys::client::RemainingCpuKey::new(old_allocation_key.client_id); + let remaining_cpu_key_buf = keys::subspace().pack(&remaining_cpu_key); + let remaining_cpu_entry = tx.get(&remaining_cpu_key_buf, SERIALIZABLE).await?; + let old_remaining_cpu = remaining_cpu_key + .deserialize( + &remaining_cpu_entry.ok_or(fdb::FdbBindingError::CustomError( + format!("key should exist: {remaining_cpu_key:?}").into(), + ))?, + ) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; + + // Update allocated amount + let new_remaining_mem = old_allocation_key.remaining_mem - memory_mib; + let new_remaining_cpu = old_remaining_cpu - input.resources.cpu; + let new_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( + client_flavor, + new_remaining_mem, + old_allocation_key.last_ping_ts, + old_allocation_key.client_id, + ); + tx.set(&keys::subspace().pack(&new_allocation_key), entry.value()); + + tracing::debug!( + old_mem=%old_allocation_key.remaining_mem, + old_cpu=%old_remaining_cpu, + new_mem=%new_remaining_mem, + new_cpu=%new_remaining_cpu, + "allocating resources" + ); + + // Update client record + let remaining_mem_key = + keys::client::RemainingMemoryKey::new(old_allocation_key.client_id); + tx.set( + &keys::subspace().pack(&remaining_mem_key), + &remaining_mem_key + .serialize(new_remaining_mem) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + tx.set( + &remaining_cpu_key_buf, + &remaining_cpu_key + .serialize(new_remaining_cpu) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + // Insert actor index key + let client_actor_key = + keys::client::ActorKey::new(old_allocation_key.client_id, input.actor_id); + tx.set( + &keys::subspace().pack(&client_actor_key), + &client_actor_key + .serialize(input.generation) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + return Ok(Some(AllocateActorOutputV2 { + client_id: old_allocation_key.client_id, + client_workflow_id, + })); + } + }) + .custom_instrument(tracing::info_span!("actor_allocate_tx")) + .await?; + + let dt = start_instant.elapsed().as_secs_f64(); + metrics::ACTOR_ALLOCATE_DURATION + .with_label_values(&[&res.is_some().to_string()]) + .observe(dt); + + Ok(res) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct UpdateFdbInput { + pub actor_id: Uuid, + pub client_id: Uuid, + pub state: protocol::ActorState, +} + +#[activity(UpdateFdb)] +pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResult<()> { + use protocol::ActorState::*; + + match &input.state { + Starting | Running { .. } | Stopping => {} + Stopped | Lost | Exited { .. } => { + ctx.fdb() + .await? + .run(|tx, _mc| async move { + // Was inserted when the actor was allocated. This is cleared when the state changes as + // well as when the actor is destroyed to ensure consistency during rescheduling and + // forced deletion. + let actor_key = keys::client::ActorKey::new(input.client_id, input.actor_id); + tx.clear(&keys::subspace().pack(&actor_key)); + + Ok(()) + }) + .custom_instrument(tracing::info_span!("actor_clear_tx")) + .await?; + } + } + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct UpdateImageInput { + pub image_id: Uuid, +} + +#[activity(UpdateImage)] +pub async fn update_image(ctx: &ActivityCtx, input: &UpdateImageInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + + sql_execute!( + [ctx, pool] + " + UPDATE state + SET image_id = ? + ", + input.image_id, + ) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct SetStartedInput {} + +#[activity(SetStarted)] +pub async fn set_started(ctx: &ActivityCtx, input: &SetStartedInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + let start_ts = util::timestamp::now(); + + let row = sql_fetch_optional!( + [ctx, (i64,), pool] + " + UPDATE state + SET start_ts = ? + WHERE start_ts IS NULL + RETURNING create_ts + ", + start_ts, + ) + .await?; + + // Add start duration if this is the first start + if let Some((create_ts,)) = row { + let dt = (start_ts - create_ts) as f64 / 1000.0; + metrics::ACTOR_START_DURATION + .with_label_values(&[]) + .observe(dt); + } + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct SetConnectableInput { + pub connectable: bool, +} + +#[activity(SetConnectable)] +pub async fn set_connectable(ctx: &ActivityCtx, input: &SetConnectableInput) -> GlobalResult { + let pool = ctx.sqlite().await?; + + let res = sql_execute!( + [ctx, pool] + " + UPDATE state + SET connectable_ts = ? + WHERE + CASE WHEN ? + THEN connectable_ts IS NULL + ELSE connectable_ts IS NOT NULL + END + ", + input.connectable.then(util::timestamp::now), + input.connectable, + ) + .await?; + + Ok(res.rows_affected() > 0) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct InsertPortsInput { + pub ports: util::serde::HashableMap, +} + +#[activity(InsertPorts)] +pub async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + let mut conn = pool.conn().await?; + let mut tx = conn.begin().await?; + + for (port_name, port) in &input.ports { + sql_execute!( + [ctx, @tx &mut tx] + " + INSERT INTO ports_proxied ( + port_name, + source, + ip + ) + VALUES (?, ?, ?) + ", + port_name, + port.source as i64, + &port.lan_hostname, + ) + .await?; + } + + tx.commit().await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct InsertPortsFdbInput { + pub actor_id: Uuid, + pub ports: util::serde::HashableMap, +} + +#[activity(InsertPortsFdb)] +pub async fn insert_ports_fdb(ctx: &ActivityCtx, input: &InsertPortsFdbInput) -> GlobalResult<()> { + let pool = &ctx.sqlite().await?; + + let ((create_ts,), ingress_ports) = tokio::try_join!( + sql_fetch_one!( + [ctx, (i64,), pool] + " + SELECT create_ts + FROM state + ", + ), + sql_fetch_all!( + [ctx, (String, i64, i64), pool] + " + SELECT port_name, ingress_port_number, protocol + FROM ports_ingress + ", + ), + )?; + + let proxied_ports = input + .ports + .iter() + // Match to ingress ports for GG + .filter_map(|(port_name, port)| { + if let Some((_, ingress_port_number, protocol)) = ingress_ports + .iter() + .find(|(ingress_port_name, _, _)| port_name == ingress_port_name) + { + Some((port_name, port, ingress_port_number, protocol)) + } else { + None + } + }) + .map(|(port_name, port, ingress_port_number, protocol)| { + let protocol = unwrap!(GameGuardProtocol::from_repr((*protocol).try_into()?)); + + Ok(keys::actor::ProxiedPort { + port_name: port_name.clone(), + create_ts, + lan_hostname: port.lan_hostname.clone(), + source: port.source, + ingress_port_number: (*ingress_port_number).try_into()?, + protocol, + }) + }) + .collect::>>()?; + + // Write proxied ingress ports to fdb index + ctx.fdb() + .await? + .run(|tx, _mc| { + let proxied_ports = proxied_ports.clone(); + async move { + let proxied_ports_key = keys::actor::ProxiedPortsKey::new(input.actor_id); + + tx.set( + &keys::subspace().pack(&proxied_ports_key), + &proxied_ports_key + .serialize(proxied_ports) + .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, + ); + + Ok(()) + } + }) + .custom_instrument(tracing::info_span!("actor_insert_proxied_ports_tx")) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CompareRetryInput { + last_retry_ts: i64, +} + +#[activity(CompareRetry)] +async fn compare_retry(ctx: &ActivityCtx, input: &CompareRetryInput) -> GlobalResult<(i64, bool)> { + let now = util::timestamp::now(); + + // If the last retry ts is more than RETRY_RESET_DURATION_MS, reset retry count + Ok((now, input.last_retry_ts < now - RETRY_RESET_DURATION_MS)) +} + +/// Returns whether or not there was availability to spawn the actor. +pub async fn spawn_actor( + ctx: &mut WorkflowCtx, + input: &Input, + actor_setup: &setup::ActorSetupCtx, + generation: u32, +) -> GlobalResult> { + let res = match ctx.check_version(2).await? { + 1 => { + ctx.activity(AllocateActorInputV1 { + actor_id: input.actor_id, + build_kind: actor_setup.meta.build_kind, + resources: actor_setup.resources.clone(), + }) + .await? + } + _ => { + ctx.v(2) + .activity(AllocateActorInputV2 { + actor_id: input.actor_id, + generation, + build_kind: actor_setup.meta.build_kind, + resources: actor_setup.resources.clone(), + }) + .await? + } + }; + + let Some(res) = res else { + return Ok(None); + }; + + let (_, ports_res) = ctx + .join(( + activity(UpdateClientInput { + client_id: res.client_id, + client_workflow_id: res.client_workflow_id, + }), + v(2).activity(FetchPortsInput { + actor_id: input.actor_id, + endpoint_type: input.endpoint_type, + }), + )) + .await?; + + let cluster_id = ctx.config().server()?.rivet.edge()?.cluster_id; + + ctx.signal(protocol::Command::StartActor { + actor_id: input.actor_id.into(), + generation, + config: Box::new(protocol::ActorConfig { + image: protocol::Image { + id: actor_setup.image_id, + artifact_url_stub: actor_setup.artifact_url_stub.clone(), + fallback_artifact_url: actor_setup.fallback_artifact_url.clone(), + kind: actor_setup.meta.build_kind.into(), + compression: actor_setup.meta.build_compression.into(), + // Always single, this is the old actor wf + allocation_type: protocol::ImageAllocationType::Single, + + // Calculated on the manager for old actors + artifact_size: 0, + }, + root_user_enabled: input.root_user_enabled, + env: input.environment.clone(), + runner: None, + ports: ports_res + .ports + .iter() + .map(|port| match port.port.routing { + Routing::GameGuard { protocol } => ( + crate::util::pegboard_normalize_port_name(&port.name), + protocol::Port { + target: port.port_number, + protocol: match protocol { + GameGuardProtocol::Http + | GameGuardProtocol::Https + | GameGuardProtocol::Tcp + | GameGuardProtocol::TcpTls => protocol::TransportProtocol::Tcp, + GameGuardProtocol::Udp => protocol::TransportProtocol::Udp, + }, + routing: protocol::PortRouting::GameGuard, + }, + ), + Routing::Host { protocol } => ( + crate::util::pegboard_normalize_port_name(&port.name), + protocol::Port { + target: port.port_number, + protocol: match protocol { + HostProtocol::Tcp => protocol::TransportProtocol::Tcp, + HostProtocol::Udp => protocol::TransportProtocol::Udp, + }, + routing: protocol::PortRouting::Host, + }, + ), + }) + .collect(), + network_mode: match input.network_mode { + NetworkMode::Bridge => protocol::NetworkMode::Bridge, + NetworkMode::Host => protocol::NetworkMode::Host, + }, + resources: actor_setup.resources.clone(), + metadata: util::serde::Raw::new(&protocol::ActorMetadata { + actor: protocol::ActorMetadataActor { + actor_id: input.actor_id.into(), + tags: input.tags.clone(), + create_ts: ctx.ts(), + }, + network: Some(protocol::ActorMetadataNetwork { + ports: ports_res + .ports + .into_iter() + .map(|port| (port.name, port.port)) + .collect(), + }), + project: protocol::ActorMetadataProject { + project_id: actor_setup.meta.project_id, + slug: actor_setup.meta.project_slug.clone(), + }, + environment: protocol::ActorMetadataEnvironment { + env_id: input.env_id, + slug: actor_setup.meta.env_slug.clone(), + }, + datacenter: protocol::ActorMetadataDatacenter { + name_id: actor_setup.meta.dc_name_id.clone(), + display_name: actor_setup.meta.dc_display_name.clone(), + }, + cluster: protocol::ActorMetadataCluster { cluster_id }, + build: protocol::ActorMetadataBuild { + build_id: input.image_id, + }, + })?, + }), + }) + .to_workflow_id(res.client_workflow_id) + .send() + .await?; + + Ok(Some(res)) +} + +pub async fn reschedule_actor( + ctx: &mut WorkflowCtx, + input: &Input, + state: &mut State, + image_id: Uuid, +) -> GlobalResult> { + tracing::debug!(actor_id=?input.actor_id, "rescheduling actor"); + + ctx.activity(ClearPortsAndResourcesInput { + actor_id: input.actor_id, + image_id, + client_id: state.client_id, + client_workflow_id: state.client_workflow_id, + }) + .await?; + + let actor_setup = setup::setup(ctx, &input, setup::SetupCtx::Reschedule { image_id }).await?; + + let next_generation = state.generation + 1; + + // Waits for the actor to be ready (or destroyed) and automatically retries if failed to allocate. + let res = ctx + .loope(state.reschedule_state.clone(), |ctx, state| { + let input = input.clone(); + let actor_setup = actor_setup.clone(); + + async move { + // Determine next backoff sleep duration + let mut backoff = + util::Backoff::new_at(8, None, BASE_RETRY_TIMEOUT_MS, 500, state.retry_count); + + let (now, reset) = ctx + .v(2) + .activity(CompareRetryInput { + last_retry_ts: state.last_retry_ts, + }) + .await?; + + state.retry_count = if reset { 0 } else { state.retry_count + 1 }; + state.last_retry_ts = now; + + // Don't sleep for first retry + if state.retry_count > 0 { + let next = backoff.step().expect("should not have max retry"); + + // Sleep for backoff or destroy early + if let Some(sig) = ctx + .listen_with_timeout::(Instant::from(next) - Instant::now()) + .await? + { + tracing::debug!("destroying before actor reschedule"); + + return Ok(Loop::Break(Err(sig))); + } + } + + if let Some(res) = spawn_actor(ctx, &input, &actor_setup, next_generation).await? { + Ok(Loop::Break(Ok((state.clone(), res)))) + } else { + tracing::debug!(actor_id=?input.actor_id, "failed to reschedule actor, retrying"); + + Ok(Loop::Continue) + } + } + .boxed() + }) + .await?; + + // Update loop state + match res { + Ok((reschedule_state, res)) => { + state.generation = next_generation; + state.client_id = res.client_id; + state.client_workflow_id = res.client_workflow_id; + + // Save reschedule state in global state + state.reschedule_state = reschedule_state; + + // Reset gc timeout once allocated + state.gc_timeout_ts = Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS); + + Ok(None) + } + Err(sig) => Ok(Some(sig)), + } +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ClearPortsAndResourcesInput { + actor_id: Uuid, + image_id: Uuid, + client_id: Uuid, + client_workflow_id: Uuid, +} + +#[activity(ClearPortsAndResources)] +async fn clear_ports_and_resources( + ctx: &ActivityCtx, + input: &ClearPortsAndResourcesInput, +) -> GlobalResult<()> { + let pool = &ctx.sqlite().await?; + + let ( + build_res, + ingress_ports, + (selected_resources_cpu_millicores, selected_resources_memory_mib), + _, + ) = tokio::try_join!( + ctx.op(build::ops::get::Input { + build_ids: vec![input.image_id], + }), + sql_fetch_all!( + [ctx, (i64, i64), pool] + " + SELECT protocol, ingress_port_number + FROM ports_ingress + ", + ), + sql_fetch_one!( + [ctx, (Option, Option), pool] + " + SELECT selected_resources_cpu_millicores, selected_resources_memory_mib + FROM state + ", + ), + // Idempotent + sql_execute!( + [ctx, pool] + " + DELETE FROM ports_proxied + ", + ), + )?; + let build = unwrap_with!(build_res.builds.first(), BUILD_NOT_FOUND); + + ctx.fdb() + .await? + .run(|tx, _mc| { + let ingress_ports = ingress_ports.clone(); + async move { + destroy::clear_ports_and_resources( + input.actor_id, + Some(build.kind), + ingress_ports, + Some(input.client_id), + Some(input.client_workflow_id), + selected_resources_memory_mib, + selected_resources_cpu_millicores, + &tx, + ) + .await + } + }) + .custom_instrument(tracing::info_span!("actor_clear_ports_and_resources_tx")) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct SetFinishedInput {} + +#[activity(SetFinished)] +pub async fn set_finished(ctx: &ActivityCtx, input: &SetFinishedInput) -> GlobalResult<()> { + let pool = ctx.sqlite().await?; + + sql_execute!( + [ctx, pool] + " + UPDATE state + SET finish_ts = ? + ", + util::timestamp::now(), + ) + .await?; + + Ok(()) +} diff --git a/packages/edge/services/pegboard/src/workflows/actor2/setup.rs b/packages/edge/services/pegboard/src/workflows/actor/v1/setup.rs similarity index 61% rename from packages/edge/services/pegboard/src/workflows/actor2/setup.rs rename to packages/edge/services/pegboard/src/workflows/actor/v1/setup.rs index 48bc0bf874..223187a30f 100644 --- a/packages/edge/services/pegboard/src/workflows/actor2/setup.rs +++ b/packages/edge/services/pegboard/src/workflows/actor/v1/setup.rs @@ -1,13 +1,8 @@ -use build::types::{BuildAllocationType, BuildCompression, BuildKind, BuildResources}; +use build::types::{BuildCompression, BuildKind}; use chirp_workflow::prelude::*; use cluster::types::BuildDeliveryMethod; -use fdb_util::{end_of_key_range, FormalKey, SNAPSHOT}; -use foundationdb::{ - self as fdb, - options::{ConflictRangeType, StreamingMode}, -}; -use futures_util::TryStreamExt; -use rand::Rng; +use fdb_util::FormalKey; +use foundationdb as fdb; use sqlx::Acquire; use super::{Input, Port}; @@ -20,7 +15,7 @@ use crate::{ pub struct ValidateInput { pub env_id: Uuid, pub tags: util::serde::HashableMap, - pub resources: Option, + pub resources: ActorResources, pub image_id: Uuid, pub root_user_enabled: bool, pub args: Vec, @@ -29,11 +24,13 @@ pub struct ValidateInput { pub network_ports: util::serde::HashableMap, } +// TODO: Redo once a solid global error solution is established so we dont have to have validation all in one +// place. #[activity(Validate)] pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult> { let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; - let (tiers, upload_res, game_config_res) = tokio::try_join!( + let (has_tier, upload_res, game_config_res) = tokio::try_join!( async { let tier_res = ctx .op(tier::ops::list::Input { @@ -41,9 +38,13 @@ pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult< pegboard: true, }) .await?; - let tier_dc = unwrap!(tier_res.datacenters.into_iter().next()); + let tier_dc = unwrap!(tier_res.datacenters.first()); - GlobalResult::Ok(tier_dc.tiers) + // Find any tier that has more CPU and memory than the requested resources + GlobalResult::Ok(tier_dc.tiers.iter().any(|t| { + t.cpu_millicores >= input.resources.cpu_millicores + && t.memory >= input.resources.memory_mib + })) }, async { let builds_res = ctx @@ -86,6 +87,10 @@ pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult< } )?; + if !has_tier { + return Ok(Some("Too many resources allocated.".into())); + } + // TODO: Validate build belongs to env/game let Some((build, upload_complete)) = upload_res else { return Ok(Some("Build not found.".into())); @@ -95,44 +100,6 @@ pub async fn validate(ctx: &ActivityCtx, input: &ValidateInput) -> GlobalResult< return Ok(Some("Build upload not complete.".into())); } - let resources = match build.allocation_type { - BuildAllocationType::None => { - // NOTE: This should be unreachable because if an old build is encountered the old actor wf is used. - return Ok(Some("Old builds not supported.".into())); - } - BuildAllocationType::Single => { - if let Some(resources) = &input.resources { - resources.clone() - } else { - return Ok(Some( - "Actors with builds of `allocation_type` = `single` must specify `resources`." - .into(), - )); - } - } - BuildAllocationType::Multi => { - if input.resources.is_some() { - return Ok(Some("Cannot specify `resources` for actors with builds of `allocation_type` = `multi`.".into())); - } - - let build_resources = unwrap!(build.resources, "multi build should have resources"); - - ActorResources { - cpu_millicores: build_resources.cpu_millicores, - memory_mib: build_resources.memory_mib, - } - } - }; - - // Find any tier that has more CPU and memory than the requested resources - let has_tier = tiers - .iter() - .any(|t| t.cpu_millicores >= resources.cpu_millicores && t.memory >= resources.memory_mib); - - if !has_tier { - return Ok(Some("Too many resources allocated.".into())); - } - let Some(game_config) = game_config_res else { return Ok(Some("Environment not found.".into())); }; @@ -286,24 +253,27 @@ pub async fn disable_tls_ports( #[derive(Debug, Clone, Serialize, Deserialize, Hash)] struct InsertDbInput { - actor_id: util::Id, + actor_id: Uuid, env_id: Uuid, tags: util::serde::HashableMap, - resources: Option, + resources: ActorResources, lifecycle: ActorLifecycle, image_id: Uuid, args: Vec, network_mode: NetworkMode, environment: util::serde::HashableMap, + network_ports: util::serde::HashableMap, } #[activity(InsertDb)] async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult { let pool = ctx.sqlite().await?; + let mut conn = pool.conn().await?; + let mut tx = conn.begin().await?; let create_ts = ctx.ts(); sql_execute!( - [ctx, &pool] + [ctx, @tx &mut tx] " INSERT INTO state ( env_id, @@ -322,8 +292,8 @@ async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult GlobalResult, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -struct AllocateIngressPortsOutput { - ports: Vec<(GameGuardProtocol, Vec)>, -} - -#[activity(AllocateIngressPorts)] -async fn allocate_ingress_ports( - ctx: &ActivityCtx, - input: &AllocateIngressPortsInput, -) -> GlobalResult { // Count up ports per protocol let mut port_counts = Vec::new(); for (_, port) in &input.network_ports { @@ -373,8 +324,7 @@ async fn allocate_ingress_ports( } } - let gg_config = &ctx.config().server()?.rivet.guard; - + // TODO: Move this from an op to an activity, and move the sql queries after to their own activity // Choose which port to assign for a job's ingress port. // This is required because TCP and UDP do not have a `Host` header and thus cannot be re-routed by hostname. // @@ -383,186 +333,19 @@ async fn allocate_ingress_ports( // - HTTPS: 443 // - TCP/TLS: random // - UDP: random - let ports = ctx - .fdb() - .await? - .run(|tx, _mc| { - let port_counts = port_counts.clone(); - async move { - let mut results = Vec::new(); - - // TODO: Parallelize - for (protocol, count) in &port_counts { - // Determine port range per protocol - let port_range = match protocol { - GameGuardProtocol::Http | GameGuardProtocol::Https => { - return Err(fdb::FdbBindingError::CustomError( - "Dynamic allocation not implemented for http/https ports".into(), - )); - } - GameGuardProtocol::Tcp | GameGuardProtocol::TcpTls => { - gg_config.min_ingress_port_tcp()..=gg_config.max_ingress_port_tcp() - } - GameGuardProtocol::Udp => { - gg_config.min_ingress_port_udp()..=gg_config.max_ingress_port_udp() - } - }; - - let mut last_port = None; - let mut ports = Vec::new(); - - // Choose a random starting port for better spread and less cache hits - let mut start = { - // It is important that we don't start at the end of the range so that the logic with - // `last_port` works correctly - let exclusive_port_range = *port_range.start()..*port_range.end(); - rand::thread_rng().gen_range(exclusive_port_range) - }; - - // Build start and end keys for ingress ports subspace - let start_key = keys::subspace() - .subspace(&keys::port::IngressKey2::subspace(*protocol, start)) - .range() - .0; - let end_key = keys::subspace() - .subspace(&keys::port::IngressKey2::subspace( - *protocol, - *port_range.end(), - )) - .range() - .1; - let mut stream = tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Iterator, - ..(start_key, end_key.clone()).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, - // just the one we choose - SNAPSHOT, - ); - - // Continue iterating over the same stream until all of the required ports are found - for _ in 0..*count { - // Iterate through the subspace range until a port is found - let port = loop { - let Some(entry) = stream.try_next().await? else { - match last_port { - Some(port) if port == *port_range.end() => { - // End of range reached, start a new range read from the beginning (wrap around) - if start != *port_range.start() { - last_port = None; - - let old_start = start; - start = *port_range.start(); - - let start_key = keys::subspace() - .subspace(&keys::port::IngressKey2::subspace( - *protocol, start, - )) - .range() - .0; - stream = tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Iterator, - limit: Some(old_start as usize), - ..(start_key, end_key.clone()).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict - // with all of the keys, just the one we choose - SNAPSHOT, - ); - - continue; - } else { - break None; - } - } - // Return port after last port - Some(last_port) => { - break Some(last_port + 1); - } - // No ports were returned (range is empty) - None => { - break Some(start); - } - } - }; - - let key = keys::subspace() - .unpack::(entry.key()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let current_port = key.port; - - if let Some(last_port) = last_port { - // Gap found - if current_port != last_port + 1 { - break Some(last_port + 1); - } - } - - last_port = Some(current_port); - }; - - let Some(port) = port else { - return Err(fdb::FdbBindingError::CustomError( - format!("not enough {protocol} ports available").into(), - )); - }; - - let ingress_port_key = - keys::port::IngressKey2::new(*protocol, port, input.actor_id); - let ingress_port_key_buf = keys::subspace().pack(&ingress_port_key); - - // Add read conflict only for this key - tx.add_conflict_range( - &ingress_port_key_buf, - &end_of_key_range(&ingress_port_key_buf), - ConflictRangeType::Read, - )?; - - // Set key - tx.set( - &ingress_port_key_buf, - &ingress_port_key - .serialize(()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - ports.push(port); - } - - results.push((*protocol, ports)); - } - - Ok(results) - } + let ingress_ports_res = ctx + .op(crate::ops::actor::v1::allocate_ingress_ports::Input { + actor_id: input.actor_id.into(), + ports: port_counts, }) - .custom_instrument(tracing::info_span!("allocate_ingress_ports_tx")) .await?; - - Ok(AllocateIngressPortsOutput { ports }) -} - -#[derive(Debug, Clone, Serialize, Deserialize, Hash)] -struct InsertPortsInput { - actor_id: util::Id, - network_ports: util::serde::HashableMap, - ingress_ports: Vec<(GameGuardProtocol, Vec)>, -} - -#[activity(InsertPorts)] -async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - let mut conn = pool.conn().await?; - let mut tx = conn.begin().await?; - - let gg_config = &ctx.config().server()?.rivet.guard; - let mut ingress_ports = input - .ingress_ports - .iter() - .map(|(protocol, ports)| (protocol, ports.clone().into_iter())) + let mut ingress_ports = ingress_ports_res + .ports + .into_iter() + .map(|(protocol, ports)| (protocol, ports.into_iter())) .collect::>(); + let gg_config = &ctx.config().server()?.rivet.guard; for (name, port) in input.network_ports.iter() { match port.routing { Routing::GameGuard { protocol } => { @@ -585,7 +368,7 @@ async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> GlobalResu GameGuardProtocol::Https => gg_config.https_port(), GameGuardProtocol::Tcp | GameGuardProtocol::TcpTls | GameGuardProtocol::Udp => { let (_, ports_iter) = unwrap!( - ingress_ports.iter_mut().find(|(p, _)| &&protocol == p) + ingress_ports.iter_mut().find(|(p, _)| &protocol == p) ); unwrap!(ports_iter.next(), "missing ingress port") }, @@ -615,12 +398,12 @@ async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> GlobalResu tx.commit().await?; - Ok(()) + Ok(create_ts) } #[derive(Debug, Clone, Serialize, Deserialize, Hash)] struct InsertFdbInput { - actor_id: util::Id, + actor_id: Uuid, env_id: Uuid, tags: util::serde::HashableMap, create_ts: i64, @@ -631,7 +414,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( ctx.fdb() .await? .run(|tx, _mc| async move { - let create_ts_key = keys::actor2::CreateTsKey::new(input.actor_id); + let create_ts_key = keys::actor::CreateTsKey::new(input.actor_id); tx.set( &keys::subspace().pack(&create_ts_key), &create_ts_key @@ -639,7 +422,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, ); - let workflow_id_key = keys::actor2::WorkflowIdKey::new(input.actor_id); + let workflow_id_key = keys::actor::WorkflowIdKey::new(input.actor_id); tx.set( &keys::subspace().pack(&workflow_id_key), &workflow_id_key @@ -649,8 +432,8 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( // Add env index key let env_actor_key = - keys::env::Actor2Key::new(input.env_id, input.create_ts, input.actor_id); - let data = keys::env::Actor2KeyData { + keys::env::ActorKey::new(input.env_id, input.create_ts, input.actor_id); + let data = keys::env::ActorKeyData { is_destroyed: false, tags: input.tags.clone().into_iter().collect(), }; @@ -670,9 +453,9 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> GlobalResult<( } #[derive(Debug, Serialize, Deserialize, Hash)] -struct GetMetaInput { - env_id: Uuid, - image_id: Uuid, +pub struct GetMetaInput { + pub env_id: Uuid, + pub image_id: Uuid, } #[derive(Clone, Debug, Serialize, Deserialize, Hash)] @@ -684,16 +467,13 @@ pub struct GetMetaOutput { pub build_file_name: String, pub build_kind: BuildKind, pub build_compression: BuildCompression, - pub build_allocation_type: BuildAllocationType, - pub build_allocation_total_slots: u32, - pub build_resources: Option, pub dc_name_id: String, pub dc_display_name: String, pub dc_build_delivery_method: BuildDeliveryMethod, } #[activity(GetMeta)] -async fn get_meta(ctx: &ActivityCtx, input: &GetMetaInput) -> GlobalResult { +pub async fn get_meta(ctx: &ActivityCtx, input: &GetMetaInput) -> GlobalResult { let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; let (env_res, build_res, dc_res) = tokio::try_join!( @@ -727,9 +507,6 @@ async fn get_meta(ctx: &ActivityCtx, input: &GetMetaInput) -> GlobalResult GlobalResult GlobalResult build_compression = ?, root_user_enabled = ? ", - input.project_id, - input.build_kind as i64, - input.build_compression as i64, + input.meta.project_id, + input.meta.build_kind as i64, + input.meta.build_compression as i64, input.root_user_enabled, ) .await?; @@ -782,6 +557,8 @@ pub struct ActorSetupCtx { pub image_id: Uuid, pub meta: GetMetaOutput, pub resources: protocol::Resources, + pub artifact_url_stub: String, + pub fallback_artifact_url: Option, } pub async fn setup( @@ -803,23 +580,10 @@ pub async fn setup( args: input.args.clone(), network_mode: input.network_mode, environment: input.environment.clone(), + network_ports, }) .await?; - let ingress_ports_res = ctx - .activity(AllocateIngressPortsInput { - actor_id: input.actor_id, - network_ports: network_ports.clone(), - }) - .await?; - - ctx.activity(InsertPortsInput { - actor_id: input.actor_id, - network_ports, - ingress_ports: ingress_ports_res.ports, - }) - .await?; - ctx.activity(InsertFdbInput { actor_id: input.actor_id, env_id: input.env_id, @@ -842,49 +606,36 @@ pub async fn setup( ctx.v(2) .activity(InsertMetaInput { - project_id: meta.project_id, - build_kind: meta.build_kind, - build_compression: meta.build_compression, + meta: meta.clone(), root_user_enabled: input.root_user_enabled, }) .await?; - // Use resources from build or from actor config - let resources = match meta.build_allocation_type { - BuildAllocationType::None => bail!("actors do not support old builds"), - BuildAllocationType::Single => unwrap!( - input.resources.clone(), - "single builds should have actor resources" - ), - BuildAllocationType::Multi => { - let build_resources = - unwrap_ref!(meta.build_resources, "multi builds should have resources"); - - ActorResources { - cpu_millicores: build_resources.cpu_millicores, - memory_mib: build_resources.memory_mib, - } - } - }; - - let resources = ctx - .activity(SelectResourcesInput { - cpu_millicores: resources.cpu_millicores, - memory_mib: resources.memory_mib, - }) + let (resources, artifacts_res) = ctx + .join(( + activity(SelectResourcesInput { + resources: input.resources.clone(), + }), + activity(ResolveArtifactsInput { + build_upload_id: meta.build_upload_id, + build_file_name: meta.build_file_name.clone(), + dc_build_delivery_method: meta.dc_build_delivery_method, + }), + )) .await?; Ok(ActorSetupCtx { image_id, meta, resources, + artifact_url_stub: artifacts_res.artifact_url_stub, + fallback_artifact_url: artifacts_res.fallback_artifact_url, }) } #[derive(Debug, Serialize, Deserialize, Hash)] struct SelectResourcesInput { - cpu_millicores: u32, - memory_mib: u32, + resources: ActorResources, } #[activity(SelectResources)] @@ -909,9 +660,10 @@ async fn select_resources( // Find the first tier that has more CPU and memory than the requested // resources let tier = unwrap!( - tiers - .iter() - .find(|t| { t.cpu_millicores >= input.cpu_millicores && t.memory >= input.memory_mib }), + tiers.iter().find(|t| { + t.cpu_millicores >= input.resources.cpu_millicores + && t.memory >= input.resources.memory_mib + }), "no suitable tier found" ); @@ -943,3 +695,64 @@ async fn select_resources( disk: tier.disk, }) } + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ResolveArtifactsInput { + build_upload_id: Uuid, + build_file_name: String, + dc_build_delivery_method: BuildDeliveryMethod, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ResolveArtifactsOutput { + artifact_url_stub: String, + fallback_artifact_url: Option, +} + +#[activity(ResolveArtifacts)] +async fn resolve_artifacts( + ctx: &ActivityCtx, + input: &ResolveArtifactsInput, +) -> GlobalResult { + // Get the fallback URL + let fallback_artifact_url = { + tracing::debug!("using s3 direct delivery"); + + // Build client + let s3_client = s3_util::Client::with_bucket_and_endpoint( + ctx.config(), + "bucket-build", + s3_util::EndpointKind::EdgeInternal, + ) + .await?; + + let presigned_req = s3_client + .get_object() + .bucket(s3_client.bucket()) + .key(format!( + "{upload_id}/{file_name}", + upload_id = input.build_upload_id, + file_name = input.build_file_name, + )) + .presigned( + s3_util::aws_sdk_s3::presigning::PresigningConfig::builder() + .expires_in(std::time::Duration::from_secs(15 * 60)) + .build()?, + ) + .await?; + + let addr_str = presigned_req.uri().to_string(); + tracing::debug!(addr = %addr_str, "resolved artifact s3 presigned request"); + + addr_str + }; + + Ok(ResolveArtifactsOutput { + artifact_url_stub: crate::util::image_artifact_url_stub( + ctx.config(), + input.build_upload_id, + &input.build_file_name, + )?, + fallback_artifact_url: Some(fallback_artifact_url), + }) +} diff --git a/packages/edge/services/pegboard/src/workflows/actor2/destroy.rs b/packages/edge/services/pegboard/src/workflows/actor2/destroy.rs deleted file mode 100644 index fa08711755..0000000000 --- a/packages/edge/services/pegboard/src/workflows/actor2/destroy.rs +++ /dev/null @@ -1,536 +0,0 @@ -use build::types::BuildAllocationType; -use chirp_workflow::prelude::*; -use fdb_util::{end_of_key_range, FormalKey, SERIALIZABLE}; -use foundationdb::{self as fdb, options::ConflictRangeType}; -use nix::sys::signal::Signal; - -use super::{ - analytics::InsertClickHouseInput, runtime::ActorRunnerClickhouseRow, DestroyComplete, - DestroyStarted, -}; -use crate::{keys, protocol, types::GameGuardProtocol}; - -#[derive(Debug, Serialize, Deserialize)] -pub struct KillCtx { - pub kill_timeout_ms: i64, -} - -#[derive(Debug, Serialize, Deserialize)] -pub(crate) struct Input { - pub actor_id: util::Id, - pub generation: u32, - pub image_id: Uuid, - pub build_allocation_type: Option, - /// Whether or not to send signals to the pb actor. In the case that the actor was already stopped - /// or exited, signals are unnecessary. - pub kill: Option, -} - -#[workflow] -pub(crate) async fn pegboard_actor_destroy( - ctx: &mut WorkflowCtx, - input: &Input, -) -> GlobalResult<()> { - ctx.msg(DestroyStarted {}) - .tag("actor_id", input.actor_id) - .send() - .await?; - - let actor = ctx.activity(UpdateDbInput {}).await?; - - if let Some(actor) = actor { - if let (Some(start_ts), Some(runner_id)) = (actor.start_ts, actor.runner_id) { - ctx.activity(FinishRunnerClickhouseInput { - actor_id: input.actor_id, - generation: input.generation, - start_ts, - runner_id, - }) - .await?; - } - - let client_workflow_id = actor.client_workflow_id; - let runner_id = actor.runner_id; - - let res = ctx - .activity(UpdateFdbInput { - actor_id: input.actor_id, - image_id: input.image_id, - build_allocation_type: input.build_allocation_type, - actor, - }) - .await?; - - // Destroy actor - if let (Some(client_workflow_id), Some(kill_data)) = (client_workflow_id, &input.kill) { - kill( - ctx, - input.actor_id, - input.generation, - client_workflow_id, - kill_data.kill_timeout_ms, - false, - ) - .await?; - } - - // Destroy runner - if let (Some(client_workflow_id), Some(runner_id), true) = - (client_workflow_id, runner_id, res.destroy_runner) - { - ctx.signal(protocol::Command::SignalRunner { - runner_id, - signal: Signal::SIGKILL as i32, - }) - .to_workflow_id(client_workflow_id) - .send() - .await?; - } - } - - // Update ClickHouse analytics with destroyed timestamp - ctx.v(2) - .activity(InsertClickHouseInput { - actor_id: input.actor_id, - }) - .await?; - - ctx.msg(DestroyComplete {}) - .tag("actor_id", input.actor_id) - .send() - .await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct UpdateDbInput {} - -#[derive(Debug, Serialize, Deserialize, Hash, sqlx::FromRow)] -struct UpdateDbOutput { - env_id: Uuid, - selected_resources_memory_mib: Option, - selected_resources_cpu_millicores: Option, - tags: sqlx::types::Json>, - create_ts: i64, - start_ts: Option, - runner_id: Option, - client_id: Option, - client_workflow_id: Option, -} - -#[activity(UpdateDb)] -async fn update_db( - ctx: &ActivityCtx, - input: &UpdateDbInput, -) -> GlobalResult> { - let pool = ctx.sqlite().await?; - - // NOTE: Row might not exist if the workflow failed before insert_db - sql_fetch_optional!( - [ctx, UpdateDbOutput, pool] - " - UPDATE state - SET destroy_ts = ? - RETURNING - env_id, - selected_resources_memory_mib, - selected_resources_cpu_millicores, - json(tags) AS tags, - create_ts, - start_ts, - runner_id, - client_id, - client_workflow_id - ", - ctx.ts(), - ) - .await -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct FinishRunnerClickhouseInput { - actor_id: util::Id, - generation: u32, - start_ts: i64, - runner_id: Uuid, -} - -#[activity(FinishRunnerClickhouse)] -async fn finish_runner_clickhouse( - ctx: &ActivityCtx, - input: &FinishRunnerClickhouseInput, -) -> GlobalResult<()> { - let inserter = ctx.clickhouse_inserter().await?; - - // Set alloc as finished - inserter.insert( - "db_pegboard_runner", - "actor_runners", - ActorRunnerClickhouseRow { - actor_id: input.actor_id.to_string(), - generation: input.generation, - runner_id: input.runner_id, - started_at: input.start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) - finished_at: util::timestamp::now() * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) - }, - )?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct UpdateFdbInput { - actor_id: util::Id, - image_id: Uuid, - build_allocation_type: Option, - actor: UpdateDbOutput, -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct UpdateFdbOutput { - destroy_runner: bool, -} - -#[activity(UpdateFdb)] -pub async fn update_fdb( - ctx: &ActivityCtx, - input: &UpdateFdbInput, -) -> GlobalResult { - let pool = ctx.sqlite().await?; - - let ingress_ports = sql_fetch_all!( - [ctx, (i64, i64), pool] - " - SELECT protocol, ingress_port_number - FROM ports_ingress - ", - ) - .await?; - - let destroy_runner = ctx - .fdb() - .await? - .run(|tx, _mc| { - let ingress_ports = ingress_ports.clone(); - async move { - // Update actor key index in env subspace - let actor_key = keys::env::Actor2Key::new( - input.actor.env_id, - input.actor.create_ts, - input.actor_id, - ); - let data = keys::env::Actor2KeyData { - is_destroyed: true, - tags: input.actor.tags.0.clone().into_iter().collect(), - }; - tx.set( - &keys::subspace().pack(&actor_key), - &actor_key - .serialize(data) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - clear_ports_and_resources( - input.actor_id, - input.image_id, - input.build_allocation_type, - ingress_ports, - input.actor.runner_id, - input.actor.client_id, - input.actor.client_workflow_id, - input.actor.selected_resources_memory_mib, - input.actor.selected_resources_cpu_millicores, - &tx, - ) - .await - } - }) - .custom_instrument(tracing::info_span!("actor_destroy_tx")) - .await?; - - Ok(UpdateFdbOutput { destroy_runner }) -} - -// TODO: Clean up args -/// Clears allocated ports and resources (if they were allocated). -pub(crate) async fn clear_ports_and_resources( - actor_id: util::Id, - image_id: Uuid, - build_allocation_type: Option, - ingress_ports: Vec<(i64, i64)>, - runner_id: Option, - client_id: Option, - client_workflow_id: Option, - selected_resources_memory_mib: Option, - selected_resources_cpu_millicores: Option, - tx: &fdb::RetryableTransaction, -) -> Result { - // Remove all allocated ingress ports - for (protocol, port) in ingress_ports { - let ingress_port_key = keys::port::IngressKey2::new( - GameGuardProtocol::from_repr( - usize::try_from(protocol) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ) - .ok_or_else(|| { - fdb::FdbBindingError::CustomError( - format!("invalid protocol variant: {protocol}").into(), - ) - })?, - u16::try_from(port).map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - actor_id, - ); - - tx.clear(&keys::subspace().pack(&ingress_port_key)); - } - - // Remove proxied ports - let proxied_ports_key = keys::actor2::ProxiedPortsKey::new(actor_id); - tx.clear(&keys::subspace().pack(&proxied_ports_key)); - - if let Some(client_id) = client_id { - // This is cleared when the state changes as well as when the actor is destroyed to ensure - // consistency during rescheduling and forced deletion. - let actor_key = keys::client::Actor2Key::new(client_id, actor_id); - tx.clear(&keys::subspace().pack(&actor_key)); - } - - // Release client's resources and update allocation index - if let ( - Some(build_allocation_type), - Some(runner_id), - Some(client_id), - Some(client_workflow_id), - Some(selected_resources_memory_mib), - Some(selected_resources_cpu_millicores), - ) = ( - build_allocation_type, - runner_id, - client_id, - client_workflow_id, - selected_resources_memory_mib, - selected_resources_cpu_millicores, - ) { - let client_flavor = protocol::ClientFlavor::Multi; - - let runner_remaining_slots_key = keys::runner::RemainingSlotsKey::new(runner_id); - let runner_remaining_slots_key_buf = keys::subspace().pack(&runner_remaining_slots_key); - let runner_total_slots_key = keys::runner::TotalSlotsKey::new(runner_id); - let runner_total_slots_key_buf = keys::subspace().pack(&runner_total_slots_key); - let client_remaining_mem_key = keys::client::RemainingMemoryKey::new(client_id); - let client_remaining_mem_key_buf = keys::subspace().pack(&client_remaining_mem_key); - let client_remaining_cpu_key = keys::client::RemainingCpuKey::new(client_id); - let client_remaining_cpu_key_buf = keys::subspace().pack(&client_remaining_cpu_key); - let client_last_ping_ts_key = keys::client::LastPingTsKey::new(client_id); - let client_last_ping_ts_key_buf = keys::subspace().pack(&client_last_ping_ts_key); - - let ( - runner_remaining_slots_entry, - runner_total_slots_entry, - client_remaining_mem_entry, - client_remaining_cpu_entry, - client_last_ping_ts_entry, - ) = tokio::try_join!( - tx.get(&runner_remaining_slots_key_buf, SERIALIZABLE), - tx.get(&runner_total_slots_key_buf, SERIALIZABLE), - tx.get(&client_remaining_mem_key_buf, SERIALIZABLE), - tx.get(&client_remaining_cpu_key_buf, SERIALIZABLE), - tx.get(&client_last_ping_ts_key_buf, SERIALIZABLE), - )?; - - let runner_remaining_slots = runner_remaining_slots_key - .deserialize( - &runner_remaining_slots_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {runner_remaining_slots_key:?}").into(), - ))?, - ) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let runner_total_slots = runner_total_slots_key - .deserialize( - &runner_total_slots_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {runner_total_slots_key:?}").into(), - ))?, - ) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let client_remaining_mem = client_remaining_mem_key - .deserialize( - &client_remaining_mem_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {client_remaining_mem_key:?}").into(), - ))?, - ) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let client_remaining_cpu = client_remaining_cpu_key - .deserialize( - &client_remaining_cpu_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {client_remaining_cpu_key:?}").into(), - ))?, - ) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let client_last_ping_ts = client_last_ping_ts_key - .deserialize( - &client_last_ping_ts_entry.ok_or(fdb::FdbBindingError::CustomError( - format!("key should exist: {client_last_ping_ts_key:?}").into(), - ))?, - ) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - let old_runner_allocation_key = keys::datacenter::RunnersByRemainingSlotsKey::new( - image_id, - runner_remaining_slots, - runner_id, - ); - let old_runner_allocation_key_buf = keys::subspace().pack(&old_runner_allocation_key); - - let old_client_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( - client_flavor, - client_remaining_mem, - client_last_ping_ts, - client_id, - ); - let old_client_allocation_key_buf = keys::subspace().pack(&old_client_allocation_key); - - let new_runner_remaining_slots = runner_remaining_slots + 1; - - // Write new remaining slots - tx.set( - &runner_remaining_slots_key_buf, - &runner_remaining_slots_key - .serialize(new_runner_remaining_slots) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - // Clear old key - tx.clear(&old_runner_allocation_key_buf); - - // Add read conflict - tx.add_conflict_range( - &old_runner_allocation_key_buf, - &end_of_key_range(&old_runner_allocation_key_buf), - ConflictRangeType::Read, - )?; - - let destroy_runner = if new_runner_remaining_slots < runner_total_slots { - let new_runner_allocation_key = keys::datacenter::RunnersByRemainingSlotsKey::new( - image_id, - new_runner_remaining_slots, - runner_id, - ); - let new_runner_allocation_key_buf = keys::subspace().pack(&new_runner_allocation_key); - - tx.set( - &new_runner_allocation_key_buf, - &new_runner_allocation_key - .serialize(keys::datacenter::RunnersByRemainingSlotsKeyData { - client_id, - client_workflow_id, - }) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - false - } - // Runner is now empty, release client resources - else { - let new_client_remaining_mem = client_remaining_mem - + u64::try_from(selected_resources_memory_mib) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - let new_client_remaining_cpu = client_remaining_cpu - + u64::try_from(selected_resources_cpu_millicores) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - tracing::debug!( - old_mem=%client_remaining_mem, - old_cpu=%client_remaining_cpu, - new_mem=%new_client_remaining_mem, - new_cpu=%new_client_remaining_cpu, - "releasing resources" - ); - - // Write new memory - tx.set( - &client_remaining_mem_key_buf, - &client_remaining_mem_key - .serialize(new_client_remaining_mem) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - // Write new cpu - tx.set( - &client_remaining_cpu_key_buf, - &client_remaining_cpu_key - .serialize(new_client_remaining_cpu) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - // Only update allocation idx if it existed before - if tx - .get(&old_client_allocation_key_buf, SERIALIZABLE) - .await? - .is_some() - { - // Clear old key - tx.clear(&old_client_allocation_key_buf); - - let new_client_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( - client_flavor, - new_client_remaining_mem, - client_last_ping_ts, - client_id, - ); - let new_client_allocation_key_buf = - keys::subspace().pack(&new_client_allocation_key); - - tx.set( - &new_client_allocation_key_buf, - &new_client_allocation_key - .serialize(client_workflow_id) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - } - - // Single container per runner allocations don't require explicitly destroying the runner because - // it is already stopped; the container = the actor. - matches!(build_allocation_type, BuildAllocationType::Multi) - }; - - Ok(destroy_runner) - } else { - Ok(false) - } -} - -pub(crate) async fn kill( - ctx: &mut WorkflowCtx, - actor_id: util::Id, - generation: u32, - client_workflow_id: Uuid, - kill_timeout_ms: i64, - persist_storage: bool, -) -> GlobalResult<()> { - if kill_timeout_ms != 0 { - ctx.signal(protocol::Command::SignalActor { - actor_id, - generation, - signal: Signal::SIGTERM as i32, - persist_storage, - }) - .to_workflow_id(client_workflow_id) - .send() - .await?; - - // See `docs/packages/job/JOB_DRAINING_AND_KILL_TIMEOUTS.md` - ctx.sleep(kill_timeout_ms).await?; - } - - ctx.signal(protocol::Command::SignalActor { - actor_id, - generation, - signal: Signal::SIGKILL as i32, - persist_storage, - }) - .to_workflow_id(client_workflow_id) - .send() - .await?; - - Ok(()) -} diff --git a/packages/edge/services/pegboard/src/workflows/actor2/runtime.rs b/packages/edge/services/pegboard/src/workflows/actor2/runtime.rs deleted file mode 100644 index 8b19bf4a96..0000000000 --- a/packages/edge/services/pegboard/src/workflows/actor2/runtime.rs +++ /dev/null @@ -1,1476 +0,0 @@ -use std::time::Instant; - -use build::types::{BuildAllocationType, BuildKind}; -use chirp_workflow::prelude::*; -use cluster::types::BuildDeliveryMethod; -use fdb_util::{end_of_key_range, FormalKey, SERIALIZABLE, SNAPSHOT}; -use foundationdb::{ - self as fdb, - options::{ConflictRangeType, StreamingMode}, -}; -use futures_util::StreamExt; -use futures_util::{FutureExt, TryStreamExt}; -use nix::sys::signal::Signal; -use sqlx::Acquire; - -use super::{ - destroy::{self, KillCtx}, - setup, Allocate, Destroy, Input, PendingAllocation, ACTOR_START_THRESHOLD_MS, - BASE_RETRY_TIMEOUT_MS, RETRY_RESET_DURATION_MS, -}; -use crate::{ - keys, metrics, - ops::actor::get, - protocol, - types::{EndpointType, GameGuardProtocol, HostProtocol, NetworkMode, Port, Routing}, - workflows::client::CLIENT_ELIGIBLE_THRESHOLD_MS, -}; - -#[derive(Deserialize, Serialize)] -pub struct State { - pub generation: u32, - pub runner_id: Uuid, - - pub client_id: Uuid, - pub client_workflow_id: Uuid, - pub image_id: Uuid, - - pub drain_timeout_ts: Option, - pub gc_timeout_ts: Option, - - #[serde(default)] - reschedule_state: RescheduleState, -} - -impl State { - pub fn new(runner_id: Uuid, client_id: Uuid, client_workflow_id: Uuid, image_id: Uuid) -> Self { - State { - generation: 0, - client_id, - client_workflow_id, - runner_id, - image_id, - drain_timeout_ts: None, - gc_timeout_ts: Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS), - reschedule_state: RescheduleState::default(), - } - } -} - -#[derive(Serialize, Deserialize)] -pub struct LifecycleRes { - pub generation: u32, - pub image_id: Uuid, - pub kill: Option, -} - -#[derive(Serialize, Deserialize, Clone, Default)] -struct RescheduleState { - last_retry_ts: i64, - retry_count: usize, -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct UpdateClientAndRunnerInput { - client_id: Uuid, - client_workflow_id: Uuid, - runner_id: Uuid, -} - -#[activity(UpdateClientAndRunner)] -async fn update_client_and_runner( - ctx: &ActivityCtx, - input: &UpdateClientAndRunnerInput, -) -> GlobalResult<()> { - let client_pool = ctx.sqlite_for_workflow(input.client_workflow_id).await?; - let pool = ctx.sqlite().await?; - - let (client_wan_hostname,) = sql_fetch_one!( - [ctx, (String,), client_pool] - " - SELECT config->'network'->>'wan_hostname' AS wan_hostname - FROM state - ", - ) - .await?; - - sql_execute!( - [ctx, &pool] - " - UPDATE state - SET - pending_allocation_ts = NULL, - client_id = ?1, - client_workflow_id = ?2, - client_wan_hostname = ?3, - runner_id = ?4, - old_runner_id = runner_id - ", - input.client_id, - input.client_workflow_id, - &client_wan_hostname, - input.runner_id, - ) - .await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct ResolveArtifactsInput { - build_upload_id: Uuid, - build_file_name: String, - dc_build_delivery_method: BuildDeliveryMethod, -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct ResolveArtifactsOutput { - artifact_url_stub: String, - fallback_artifact_url: String, - /// Bytes. - artifact_size: u64, -} - -#[activity(ResolveArtifacts)] -async fn resolve_artifacts( - ctx: &ActivityCtx, - input: &ResolveArtifactsInput, -) -> GlobalResult { - // Get the fallback URL - let fallback_artifact_url = { - tracing::debug!("using s3 direct delivery"); - - // Build client - let s3_client = s3_util::Client::with_bucket_and_endpoint( - ctx.config(), - "bucket-build", - s3_util::EndpointKind::EdgeInternal, - ) - .await?; - - let presigned_req = s3_client - .get_object() - .bucket(s3_client.bucket()) - .key(format!( - "{upload_id}/{file_name}", - upload_id = input.build_upload_id, - file_name = input.build_file_name, - )) - .presigned( - s3_util::aws_sdk_s3::presigning::PresigningConfig::builder() - .expires_in(std::time::Duration::from_secs(15 * 60)) - .build()?, - ) - .await?; - - let addr_str = presigned_req.uri().to_string(); - tracing::debug!(addr = %addr_str, "resolved artifact s3 presigned request"); - - addr_str - }; - - // Get the artifact size - let uploads_res = op!([ctx] upload_get { - upload_ids: vec![input.build_upload_id.into()], - }) - .await?; - let upload = unwrap!(uploads_res.uploads.first()); - - Ok(ResolveArtifactsOutput { - artifact_url_stub: crate::util::image_artifact_url_stub( - ctx.config(), - input.build_upload_id, - &input.build_file_name, - )?, - fallback_artifact_url, - artifact_size: upload.content_length, - }) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct FetchPortsInput { - actor_id: util::Id, - endpoint_type: Option, -} - -#[derive(Debug, Serialize, Deserialize)] -struct FetchPortsOutput { - ports: Vec, -} - -#[derive(Debug, Serialize, Deserialize)] -struct FetchedPort { - name: String, - port_number: Option, - port: Port, -} - -#[activity(FetchPorts)] -async fn fetch_ports(ctx: &ActivityCtx, input: &FetchPortsInput) -> GlobalResult { - let pool = ctx.sqlite().await?; - - let dc_id = ctx.config().server()?.rivet.edge()?.datacenter_id; - - let ((wan_hostname,), port_ingress_rows, port_host_rows, dc_res) = tokio::try_join!( - sql_fetch_one!( - [ctx, (Option,), &pool] - " - SELECT client_wan_hostname - FROM state - ", - ), - sql_fetch_all!( - [ctx, get::PortIngress, &pool] - " - SELECT - port_name, - port_number, - ingress_port_number, - protocol - FROM ports_ingress - ", - ), - sql_fetch_all!( - [ctx, get::PortHost, &pool] - " - SELECT port_name, port_number, protocol - FROM ports_host - ", - ), - ctx.op(cluster::ops::datacenter::get::Input { - datacenter_ids: vec![dc_id], - }), - )?; - - let dc = unwrap!(dc_res.datacenters.first()); - - let endpoint_type = input.endpoint_type.unwrap_or_else(|| { - EndpointType::default_for_guard_public_hostname(&dc.guard_public_hostname) - }); - - let ports = port_ingress_rows - .into_iter() - .map(|row| { - let port = get::create_port_ingress( - input.actor_id, - &row, - unwrap!(GameGuardProtocol::from_repr(row.protocol.try_into()?)), - endpoint_type, - &dc.guard_public_hostname, - )?; - - Ok(FetchedPort { - name: row.port_name, - port_number: row.port_number.map(TryInto::try_into).transpose()?, - port, - }) - }) - .chain(port_host_rows.into_iter().map(|row| { - let port = get::create_port_host( - true, - wan_hostname.as_deref(), - &row, - // Placeholder, will be replaced by the manager when building metadata - Some(&get::PortProxied { - port_name: String::new(), - source: 0, - }), - )?; - - Ok(FetchedPort { - name: row.port_name, - port_number: row.port_number.map(TryInto::try_into).transpose()?, - port, - }) - })) - .collect::>>()?; - - Ok(FetchPortsOutput { ports }) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct AllocateActorInput { - actor_id: util::Id, - generation: u32, - image_id: Uuid, - build_allocation_type: BuildAllocationType, - build_allocation_total_slots: u32, - resources: protocol::Resources, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct AllocateActorOutput { - pub runner_id: Uuid, - pub new_runner: bool, - pub client_id: Uuid, - pub client_workflow_id: Uuid, -} - -// If no availability, returns the timestamp of the actor's queue key -#[activity(AllocateActor)] -async fn allocate_actor( - ctx: &ActivityCtx, - input: &AllocateActorInput, -) -> GlobalResult> { - let client_flavor = protocol::ClientFlavor::Multi; - let memory_mib = input.resources.memory / 1024 / 1024; - - let start_instant = Instant::now(); - - // NOTE: This txn should closely resemble the one found in the allocate_pending_actors activity of the - // client wf - let res = ctx - .fdb() - .await? - .run(|tx, _mc| async move { - // Check for availability amongst existing runners - let image_queue_exists = if let BuildAllocationType::Multi = input.build_allocation_type - { - // Check if a queue for this image exists - let pending_actor_by_image_subspace = keys::subspace().subspace( - &keys::datacenter::PendingActorByImageIdKey::subspace(input.image_id), - ); - let queue_exists = tx - .get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Exact, - limit: Some(1), - ..(&pending_actor_by_image_subspace).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with other - // inserts/clears to this range - // queue - SNAPSHOT, - ) - .try_next() - .await? - .is_some(); - - if !queue_exists { - // Select a range that only includes runners that have enough remaining slots to allocate - // this actor - let start = keys::subspace().pack( - &keys::datacenter::RunnersByRemainingSlotsKey::subspace_with_slots( - input.image_id, - 1, - ), - ); - let runner_allocation_subspace = - keys::datacenter::RunnersByRemainingSlotsKey::subspace(input.image_id); - let end = keys::subspace() - .subspace(&runner_allocation_subspace) - .range() - .1; - - let mut stream = tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Iterator, - // Containers bin pack so we reverse the order - reverse: true, - ..(start, end).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, just - // the one we choose - SNAPSHOT, - ); - - loop { - let Some(entry) = stream.try_next().await? else { - break; - }; - - let old_runner_allocation_key = keys::subspace() - .unpack::(entry.key()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - let data = old_runner_allocation_key - .deserialize(entry.value()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - // Add read conflict only for this key - tx.add_conflict_range( - entry.key(), - &end_of_key_range(entry.key()), - ConflictRangeType::Read, - )?; - - // Clear old entry - tx.clear(entry.key()); - - let new_remaining_slots = - old_runner_allocation_key.remaining_slots.saturating_sub(1); - - // Write new allocation key with 1 less slot - let new_allocation_key = keys::datacenter::RunnersByRemainingSlotsKey::new( - input.image_id, - new_remaining_slots, - old_runner_allocation_key.runner_id, - ); - tx.set(&keys::subspace().pack(&new_allocation_key), entry.value()); - - // Update runner record - let remaining_slots_key = keys::runner::RemainingSlotsKey::new( - old_runner_allocation_key.runner_id, - ); - tx.set( - &keys::subspace().pack(&remaining_slots_key), - &remaining_slots_key - .serialize(new_remaining_slots) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - // Insert actor index key - let client_actor_key = - keys::client::Actor2Key::new(data.client_id, input.actor_id); - tx.set( - &keys::subspace().pack(&client_actor_key), - &client_actor_key - .serialize(input.generation) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - return Ok(Ok(AllocateActorOutput { - runner_id: old_runner_allocation_key.runner_id, - new_runner: false, - client_id: data.client_id, - client_workflow_id: data.client_workflow_id, - })); - } - } - - queue_exists - } else { - false - }; - - // No available runner found, create a new one - - // Check if a queue exists - let pending_actor_subspace = - keys::subspace().subspace(&keys::datacenter::PendingActorKey::subspace()); - let queue_exists = if image_queue_exists { - // We don't have to check the range if the image queue exists, its guaranteed that this one - // exists too - true - } else { - tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Exact, - limit: Some(1), - ..(&pending_actor_subspace).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with other - // inserts/clears to this range - // queue - SNAPSHOT, - ) - .next() - .await - .is_some() - }; - - if !queue_exists { - let runner_id = Uuid::new_v4(); - - let ping_threshold_ts = util::timestamp::now() - CLIENT_ELIGIBLE_THRESHOLD_MS; - - // Select a range that only includes clients that have enough remaining mem to allocate this actor - let start = keys::subspace().pack( - &keys::datacenter::ClientsByRemainingMemKey::subspace_with_mem( - client_flavor, - memory_mib, - ), - ); - let client_allocation_subspace = - keys::datacenter::ClientsByRemainingMemKey::subspace(client_flavor); - let end = keys::subspace() - .subspace(&client_allocation_subspace) - .range() - .1; - - let mut stream = tx.get_ranges_keyvalues( - fdb::RangeOption { - mode: StreamingMode::Iterator, - // Containers bin pack so we reverse the order - reverse: true, - ..(start, end).into() - }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the keys, just - // the one we choose - SNAPSHOT, - ); - - loop { - let Some(entry) = stream.try_next().await? else { - break; - }; - - let old_client_allocation_key = keys::subspace() - .unpack::(entry.key()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - // Scan by last ping - if old_client_allocation_key.last_ping_ts < ping_threshold_ts { - continue; - } - - let client_workflow_id = - old_client_allocation_key - .deserialize(entry.value()) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - // Add read conflict only for this key - tx.add_conflict_range( - entry.key(), - &end_of_key_range(entry.key()), - ConflictRangeType::Read, - )?; - - // Clear old entry - tx.clear(entry.key()); - - // Read old cpu - let remaining_cpu_key = - keys::client::RemainingCpuKey::new(old_client_allocation_key.client_id); - let remaining_cpu_key_buf = keys::subspace().pack(&remaining_cpu_key); - let remaining_cpu_entry = tx.get(&remaining_cpu_key_buf, SERIALIZABLE).await?; - let old_remaining_cpu = remaining_cpu_key - .deserialize(&remaining_cpu_entry.ok_or( - fdb::FdbBindingError::CustomError( - format!("key should exist: {remaining_cpu_key:?}").into(), - ), - )?) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?; - - // Update allocated amount - let new_remaining_mem = old_client_allocation_key.remaining_mem - memory_mib; - let new_remaining_cpu = old_remaining_cpu - input.resources.cpu; - let new_allocation_key = keys::datacenter::ClientsByRemainingMemKey::new( - client_flavor, - new_remaining_mem, - old_client_allocation_key.last_ping_ts, - old_client_allocation_key.client_id, - ); - tx.set(&keys::subspace().pack(&new_allocation_key), entry.value()); - - tracing::debug!( - old_mem=%old_client_allocation_key.remaining_mem, - old_cpu=%old_remaining_cpu, - new_mem=%new_remaining_mem, - new_cpu=%new_remaining_cpu, - "allocating runner resources" - ); - - // Update client record - let remaining_mem_key = - keys::client::RemainingMemoryKey::new(old_client_allocation_key.client_id); - tx.set( - &keys::subspace().pack(&remaining_mem_key), - &remaining_mem_key - .serialize(new_remaining_mem) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - tx.set( - &remaining_cpu_key_buf, - &remaining_cpu_key - .serialize(new_remaining_cpu) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - let remaining_slots = input.build_allocation_total_slots.saturating_sub(1); - let total_slots = input.build_allocation_total_slots; - - // Insert runner records - let remaining_slots_key = keys::runner::RemainingSlotsKey::new(runner_id); - tx.set( - &keys::subspace().pack(&remaining_slots_key), - &remaining_slots_key - .serialize(remaining_slots) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - let total_slots_key = keys::runner::TotalSlotsKey::new(runner_id); - tx.set( - &keys::subspace().pack(&total_slots_key), - &total_slots_key - .serialize(total_slots) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - let image_id_key = keys::runner::ImageIdKey::new(runner_id); - tx.set( - &keys::subspace().pack(&image_id_key), - &image_id_key - .serialize(input.image_id) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - // Insert runner index key if multi. Single allocation per container runners don't need to be - // in the alloc idx because they only have 1 slot - if let BuildAllocationType::Multi = input.build_allocation_type { - let runner_idx_key = keys::datacenter::RunnersByRemainingSlotsKey::new( - input.image_id, - remaining_slots, - runner_id, - ); - tx.set( - &keys::subspace().pack(&runner_idx_key), - &runner_idx_key - .serialize(keys::datacenter::RunnersByRemainingSlotsKeyData { - client_id: old_client_allocation_key.client_id, - client_workflow_id, - }) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - } - - // Insert actor index key - let client_actor_key = keys::client::Actor2Key::new( - old_client_allocation_key.client_id, - input.actor_id, - ); - tx.set( - &keys::subspace().pack(&client_actor_key), - &client_actor_key - .serialize(input.generation) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - return Ok(Ok(AllocateActorOutput { - runner_id, - new_runner: true, - client_id: old_client_allocation_key.client_id, - client_workflow_id, - })); - } - } - - // At this point in the txn there is no availability. Write the actor to the alloc queue to wait. - - let pending_ts = util::timestamp::now(); - - // Write self to image alloc queue - if let BuildAllocationType::Multi = input.build_allocation_type { - let image_pending_alloc_key = keys::datacenter::PendingActorByImageIdKey::new( - input.image_id, - pending_ts, - input.actor_id, - ); - let image_pending_alloc_data = keys::datacenter::PendingActorByImageIdKeyData { - generation: input.generation, - build_allocation_type: input.build_allocation_type, - build_allocation_total_slots: input.build_allocation_total_slots, - cpu: input.resources.cpu, - memory: input.resources.memory, - }; - - // NOTE: This will conflict with serializable reads to the alloc queue, which is the behavior we - // want. If a client reads from the queue while this is being inserted, one of the two txns will - // retry and we ensure the actor does not end up in queue limbo. - tx.set( - &keys::subspace().pack(&image_pending_alloc_key), - &image_pending_alloc_key - .serialize(image_pending_alloc_data) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - } - - // Write self to global alloc queue - let pending_alloc_key = - keys::datacenter::PendingActorKey::new(pending_ts, input.actor_id); - let pending_alloc_data = keys::datacenter::PendingActorKeyData { - generation: input.generation, - image_id: input.image_id, - build_allocation_type: input.build_allocation_type, - build_allocation_total_slots: input.build_allocation_total_slots, - cpu: input.resources.cpu, - memory: input.resources.memory, - }; - - // NOTE: This will conflict with serializable reads to the alloc queue, which is the behavior we - // want. If a client reads from the queue while this is being inserted, one of the two txns will - // retry and we ensure the actor does not end up in queue limbo. - tx.set( - &keys::subspace().pack(&pending_alloc_key), - &pending_alloc_key - .serialize(pending_alloc_data) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - return Ok(Err(pending_ts)); - }) - .custom_instrument(tracing::info_span!("actor_allocate_tx")) - .await?; - - let dt = start_instant.elapsed().as_secs_f64(); - metrics::ACTOR_ALLOCATE_DURATION - .with_label_values(&[&res.is_ok().to_string()]) - .observe(dt); - - Ok(res) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct UpdateFdbInput { - pub actor_id: util::Id, - pub client_id: Uuid, - pub state: protocol::ActorState, -} - -#[activity(UpdateFdb)] -pub async fn update_fdb(ctx: &ActivityCtx, input: &UpdateFdbInput) -> GlobalResult<()> { - use protocol::ActorState::*; - - match &input.state { - Starting | Running { .. } | Stopping => {} - Stopped | Lost | Exited { .. } => { - ctx.fdb() - .await? - .run(|tx, _mc| async move { - // Was inserted when the actor was allocated. This is cleared when the state changes as - // well as when the actor is destroyed to ensure consistency during rescheduling and - // forced deletion. - let actor_key = keys::client::Actor2Key::new(input.client_id, input.actor_id); - tx.clear(&keys::subspace().pack(&actor_key)); - - Ok(()) - }) - .custom_instrument(tracing::info_span!("actor_clear_tx")) - .await?; - } - } - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct UpdateImageInput { - pub image_id: Uuid, -} - -#[activity(UpdateImage)] -pub async fn update_image(ctx: &ActivityCtx, input: &UpdateImageInput) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - - sql_execute!( - [ctx, pool] - " - UPDATE state - SET image_id = ? - ", - input.image_id, - ) - .await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct SetStartedInput { - pub actor_id: util::Id, - pub generation: u32, -} - -#[derive(Serialize)] -pub(crate) struct ActorRunnerClickhouseRow { - pub actor_id: String, - pub generation: u32, - pub runner_id: Uuid, - pub started_at: i64, - pub finished_at: i64, -} - -#[activity(SetStarted)] -pub async fn set_started(ctx: &ActivityCtx, input: &SetStartedInput) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - let start_ts = util::timestamp::now(); - - let (create_ts, old_start_ts, runner_id, old_runner_id) = sql_fetch_one!( - [ctx, (i64, Option, Uuid, Option), &pool] - " - SELECT create_ts, start_ts, runner_id, old_runner_id - FROM state - ", - start_ts, - ) - .await?; - - sql_execute!( - [ctx, &pool] - " - UPDATE state SET start_ts = ?1 - ", - start_ts, - ) - .await?; - - let inserter = ctx.clickhouse_inserter().await?; - - // Set old alloc as finished - if let (Some(old_start_ts), Some(old_runner_id)) = (old_start_ts, old_runner_id) { - inserter.insert( - "db_pegboard_runner", - "actor_runners", - ActorRunnerClickhouseRow { - actor_id: input.actor_id.to_string(), - generation: input.generation, - runner_id: old_runner_id, - started_at: old_start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) - finished_at: start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) - }, - )?; - } - - // Insert new alloc - inserter.insert( - "db_pegboard_runner", - "actor_runners", - ActorRunnerClickhouseRow { - actor_id: input.actor_id.to_string(), - generation: input.generation, - runner_id, - started_at: start_ts * 1_000_000, // Convert ms to ns for ClickHouse DateTime64(9) - finished_at: 0, - }, - )?; - - // Add start metric for first start - if old_start_ts.is_none() { - let dt = (start_ts - create_ts) as f64 / 1000.0; - metrics::ACTOR_START_DURATION - .with_label_values(&[]) - .observe(dt); - } - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct SetConnectableInput { - pub connectable: bool, -} - -#[activity(SetConnectable)] -pub async fn set_connectable(ctx: &ActivityCtx, input: &SetConnectableInput) -> GlobalResult { - let pool = ctx.sqlite().await?; - - let res = sql_execute!( - [ctx, pool] - " - UPDATE state - SET connectable_ts = ? - WHERE - CASE WHEN ? - THEN connectable_ts IS NULL - ELSE connectable_ts IS NOT NULL - END - ", - input.connectable.then(util::timestamp::now), - input.connectable, - ) - .await?; - - Ok(res.rows_affected() > 0) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct InsertPortsInput { - pub ports: util::serde::HashableMap, -} - -#[activity(InsertPorts)] -pub async fn insert_ports(ctx: &ActivityCtx, input: &InsertPortsInput) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - let mut conn = pool.conn().await?; - let mut tx = conn.begin().await?; - - for (port_name, port) in &input.ports { - sql_execute!( - [ctx, @tx &mut tx] - " - INSERT INTO ports_proxied ( - port_name, - source, - ip - ) - VALUES (?, ?, ?) - ", - port_name, - port.source as i64, - &port.lan_hostname, - ) - .await?; - } - - tx.commit().await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct InsertPortsFdbInput { - pub actor_id: util::Id, - pub ports: util::serde::HashableMap, -} - -#[activity(InsertPortsFdb)] -pub async fn insert_ports_fdb(ctx: &ActivityCtx, input: &InsertPortsFdbInput) -> GlobalResult<()> { - let pool = &ctx.sqlite().await?; - - let ((create_ts,), ingress_ports) = tokio::try_join!( - sql_fetch_one!( - [ctx, (i64,), pool] - " - SELECT create_ts - FROM state - ", - ), - sql_fetch_all!( - [ctx, (String, i64, i64), pool] - " - SELECT port_name, ingress_port_number, protocol - FROM ports_ingress - ", - ), - )?; - - let proxied_ports = input - .ports - .iter() - // Match to ingress ports for GG - .filter_map(|(port_name, port)| { - if let Some((_, ingress_port_number, protocol)) = ingress_ports - .iter() - .find(|(ingress_port_name, _, _)| port_name == ingress_port_name) - { - Some((port_name, port, ingress_port_number, protocol)) - } else { - None - } - }) - .map(|(port_name, port, ingress_port_number, protocol)| { - let protocol = unwrap!(GameGuardProtocol::from_repr((*protocol).try_into()?)); - - Ok(keys::actor2::ProxiedPort { - port_name: port_name.clone(), - create_ts, - lan_hostname: port.lan_hostname.clone(), - source: port.source, - ingress_port_number: (*ingress_port_number).try_into()?, - protocol, - }) - }) - .collect::>>()?; - - // Write proxied ingress ports to fdb index - ctx.fdb() - .await? - .run(|tx, _mc| { - let proxied_ports = proxied_ports.clone(); - async move { - let proxied_ports_key = keys::actor2::ProxiedPortsKey::new(input.actor_id); - - tx.set( - &keys::subspace().pack(&proxied_ports_key), - &proxied_ports_key - .serialize(proxied_ports) - .map_err(|x| fdb::FdbBindingError::CustomError(x.into()))?, - ); - - Ok(()) - } - }) - .custom_instrument(tracing::info_span!("actor_insert_proxied_ports_tx")) - .await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct CompareRetryInput { - last_retry_ts: i64, -} - -#[activity(CompareRetry)] -async fn compare_retry(ctx: &ActivityCtx, input: &CompareRetryInput) -> GlobalResult<(i64, bool)> { - let now = util::timestamp::now(); - - // If the last retry ts is more than RETRY_RESET_DURATION_MS, reset retry count - Ok((now, input.last_retry_ts < now - RETRY_RESET_DURATION_MS)) -} - -/// Returns None if a destroy signal was received while pending for allocation. -pub async fn spawn_actor( - ctx: &mut WorkflowCtx, - input: &Input, - actor_setup: &setup::ActorSetupCtx, - generation: u32, -) -> GlobalResult> { - // Attempt allocation - let allocate_res = ctx - .activity(AllocateActorInput { - actor_id: input.actor_id, - generation, - image_id: actor_setup.image_id, - build_allocation_type: actor_setup.meta.build_allocation_type, - build_allocation_total_slots: actor_setup.meta.build_allocation_total_slots, - resources: actor_setup.resources.clone(), - }) - .await?; - - let allocate_res = match allocate_res { - Ok(x) => x, - Err(pending_allocation_ts) => { - tracing::warn!( - actor_id=?input.actor_id, - "failed to allocate (no availability), waiting for allocation", - ); - - ctx.activity(SetPendingAllocationInput { - pending_allocation_ts, - }) - .await?; - - // If allocation fails, the allocate txn already inserted this actor into the queue. Now we wait for - // an `Allocate` signal - match ctx.listen::().await? { - PendingAllocation::Allocate(sig) => AllocateActorOutput { - runner_id: sig.runner_id, - new_runner: sig.new_runner, - client_id: sig.client_id, - client_workflow_id: sig.client_workflow_id, - }, - // We ignore the signal's override_kill_timeout_ms because the actor isn't allocated - PendingAllocation::Destroy(_sig) => { - tracing::debug!("destroying before actor allocated"); - - let cleared = ctx - .activity(ClearPendingAllocationInput { - actor_id: input.actor_id, - pending_allocation_ts, - }) - .await?; - - // If this actor was no longer present in the queue it means it was allocated. We must now - // wait for the allocated signal to prevent a race condition. - if !cleared { - let sig = ctx.listen::().await?; - - ctx.activity(UpdateClientAndRunnerInput { - client_id: sig.client_id, - client_workflow_id: sig.client_workflow_id, - runner_id: sig.runner_id, - }) - .await?; - } - - return Ok(None); - } - } - } - }; - - let (_, artifacts_res, ports_res) = ctx - .join(( - activity(UpdateClientAndRunnerInput { - client_id: allocate_res.client_id, - client_workflow_id: allocate_res.client_workflow_id, - runner_id: allocate_res.runner_id, - }), - // NOTE: We resolve the artifacts here instead of in setup::setup because we don't know how - // long it will be after setup until an actor is allocated so the presigned artifact url might - // expire. - activity(ResolveArtifactsInput { - build_upload_id: actor_setup.meta.build_upload_id, - build_file_name: actor_setup.meta.build_file_name.clone(), - dc_build_delivery_method: actor_setup.meta.dc_build_delivery_method, - }), - activity(FetchPortsInput { - actor_id: input.actor_id, - endpoint_type: input.endpoint_type, - }), - )) - .await?; - - let cluster_id = ctx.config().server()?.rivet.edge()?.cluster_id; - - let image = protocol::Image { - id: actor_setup.image_id, - artifact_url_stub: artifacts_res.artifact_url_stub.clone(), - fallback_artifact_url: Some(artifacts_res.fallback_artifact_url.clone()), - artifact_size: artifacts_res.artifact_size, - kind: match actor_setup.meta.build_kind { - BuildKind::DockerImage => protocol::ImageKind::DockerImage, - BuildKind::OciBundle => protocol::ImageKind::OciBundle, - BuildKind::JavaScript => bail!("actors do not support js builds"), - }, - compression: actor_setup.meta.build_compression.into(), - allocation_type: match actor_setup.meta.build_allocation_type { - BuildAllocationType::None => bail!("actors do not support old builds"), - BuildAllocationType::Single => protocol::ImageAllocationType::Single, - BuildAllocationType::Multi => protocol::ImageAllocationType::Multi, - }, - }; - let ports = ports_res - .ports - .iter() - .map(|port| match port.port.routing { - Routing::GameGuard { protocol } => ( - crate::util::pegboard_normalize_port_name(&port.name), - protocol::Port { - target: port.port_number, - protocol: match protocol { - GameGuardProtocol::Http - | GameGuardProtocol::Https - | GameGuardProtocol::Tcp - | GameGuardProtocol::TcpTls => protocol::TransportProtocol::Tcp, - GameGuardProtocol::Udp => protocol::TransportProtocol::Udp, - }, - routing: protocol::PortRouting::GameGuard, - }, - ), - Routing::Host { protocol } => ( - crate::util::pegboard_normalize_port_name(&port.name), - protocol::Port { - target: port.port_number, - protocol: match protocol { - HostProtocol::Tcp => protocol::TransportProtocol::Tcp, - HostProtocol::Udp => protocol::TransportProtocol::Udp, - }, - routing: protocol::PortRouting::Host, - }, - ), - }) - .collect::>(); - let network_mode = match input.network_mode { - NetworkMode::Bridge => protocol::NetworkMode::Bridge, - NetworkMode::Host => protocol::NetworkMode::Host, - }; - - ctx.signal(protocol::Command::StartActor { - actor_id: input.actor_id, - generation, - config: Box::new(protocol::ActorConfig { - runner: if allocate_res.new_runner { - Some(protocol::ActorRunner::New { - runner_id: allocate_res.runner_id, - config: protocol::RunnerConfig { - image: image.clone(), - root_user_enabled: input.root_user_enabled, - resources: actor_setup.resources.clone(), - env: input.environment.clone(), - ports: ports.clone(), - network_mode, - }, - }) - } else { - Some(protocol::ActorRunner::Existing { - runner_id: allocate_res.runner_id, - }) - }, - env: input.environment.clone(), - metadata: util::serde::Raw::new(&protocol::ActorMetadata { - actor: protocol::ActorMetadataActor { - actor_id: input.actor_id, - tags: input.tags.clone(), - create_ts: ctx.ts(), - }, - network: Some(protocol::ActorMetadataNetwork { - ports: ports_res - .ports - .into_iter() - .map(|port| (port.name, port.port)) - .collect(), - }), - project: protocol::ActorMetadataProject { - project_id: actor_setup.meta.project_id, - slug: actor_setup.meta.project_slug.clone(), - }, - environment: protocol::ActorMetadataEnvironment { - env_id: input.env_id, - slug: actor_setup.meta.env_slug.clone(), - }, - datacenter: protocol::ActorMetadataDatacenter { - name_id: actor_setup.meta.dc_name_id.clone(), - display_name: actor_setup.meta.dc_display_name.clone(), - }, - cluster: protocol::ActorMetadataCluster { cluster_id }, - build: protocol::ActorMetadataBuild { - build_id: actor_setup.image_id, - }, - })?, - - // Deprecated - image, - root_user_enabled: input.root_user_enabled, - resources: actor_setup.resources.clone(), - ports, - network_mode, - }), - }) - .to_workflow_id(allocate_res.client_workflow_id) - .send() - .await?; - - Ok(Some(allocate_res)) -} - -/// Returns true if the actor should be destroyed. -pub async fn reschedule_actor( - ctx: &mut WorkflowCtx, - input: &Input, - state: &mut State, - image_id: Uuid, -) -> GlobalResult { - tracing::debug!(actor_id=?input.actor_id, "rescheduling actor"); - - let res = ctx - .activity(ClearPortsAndResourcesInput { - actor_id: input.actor_id, - image_id, - runner_id: state.runner_id, - client_id: state.client_id, - client_workflow_id: state.client_workflow_id, - }) - .await?; - - // `destroy_runner` is true when this was the last actor running on that runner, meaning we have to - // destroy it. - if res.destroy_runner { - ctx.signal(protocol::Command::SignalRunner { - runner_id: state.runner_id, - signal: Signal::SIGKILL as i32, - }) - .to_workflow_id(state.client_workflow_id) - .send() - .await?; - } - - let actor_setup = setup::setup(ctx, &input, setup::SetupCtx::Reschedule { image_id }).await?; - - let next_generation = state.generation + 1; - - // Waits for the actor to be ready (or destroyed) and automatically retries if failed to allocate. - let res = ctx - .loope(state.reschedule_state.clone(), |ctx, state| { - let input = input.clone(); - let actor_setup = actor_setup.clone(); - - async move { - // Determine next backoff sleep duration - let mut backoff = - util::Backoff::new_at(8, None, BASE_RETRY_TIMEOUT_MS, 500, state.retry_count); - - let (now, reset) = ctx - .v(2) - .activity(CompareRetryInput { - last_retry_ts: state.last_retry_ts, - }) - .await?; - - state.retry_count = if reset { 0 } else { state.retry_count + 1 }; - state.last_retry_ts = now; - - // Don't sleep for first retry - if state.retry_count > 0 { - let next = backoff.step().expect("should not have max retry"); - - // Sleep for backoff or destroy early - if let Some(_sig) = ctx - .listen_with_timeout::(Instant::from(next) - Instant::now()) - .await? - { - tracing::debug!("destroying before actor start"); - - return Ok(Loop::Break(None)); - } - } - - if let Some(res) = spawn_actor(ctx, &input, &actor_setup, next_generation).await? { - Ok(Loop::Break(Some((state.clone(), res)))) - } else { - // Destroyed early - Ok(Loop::Break(None)) - } - } - .boxed() - }) - .await?; - - // Update loop state - if let Some((reschedule_state, res)) = res { - state.generation = next_generation; - state.runner_id = res.runner_id; - state.client_id = res.client_id; - state.client_workflow_id = res.client_workflow_id; - - // Save reschedule state in global state - state.reschedule_state = reschedule_state; - - // Reset gc timeout once allocated - state.gc_timeout_ts = Some(util::timestamp::now() + ACTOR_START_THRESHOLD_MS); - - Ok(false) - } else { - Ok(true) - } -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct SetPendingAllocationInput { - pending_allocation_ts: i64, -} - -#[activity(SetPendingAllocation)] -pub async fn set_pending_allocation( - ctx: &ActivityCtx, - input: &SetPendingAllocationInput, -) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - - sql_execute!( - [ctx, pool] - " - UPDATE state - SET pending_allocation_ts = ? - ", - input.pending_allocation_ts, - ) - .await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct ClearPendingAllocationInput { - actor_id: util::Id, - pending_allocation_ts: i64, -} - -#[activity(ClearPendingAllocation)] -pub async fn clear_pending_allocation( - ctx: &ActivityCtx, - input: &ClearPendingAllocationInput, -) -> GlobalResult { - // Clear self from alloc queue - let cleared = ctx - .fdb() - .await? - .run(|tx, _mc| async move { - let pending_alloc_key = keys::subspace().pack(&keys::datacenter::PendingActorKey::new( - input.pending_allocation_ts, - input.actor_id, - )); - - let exists = tx.get(&pending_alloc_key, SERIALIZABLE).await?.is_some(); - - tx.clear(&pending_alloc_key); - - Ok(exists) - }) - .await?; - - Ok(cleared) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -struct ClearPortsAndResourcesInput { - actor_id: util::Id, - image_id: Uuid, - runner_id: Uuid, - client_id: Uuid, - client_workflow_id: Uuid, -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct ClearPortsAndResourcesOutput { - destroy_runner: bool, -} - -#[activity(ClearPortsAndResources)] -async fn clear_ports_and_resources( - ctx: &ActivityCtx, - input: &ClearPortsAndResourcesInput, -) -> GlobalResult { - let pool = &ctx.sqlite().await?; - - let ( - build_res, - ingress_ports, - (selected_resources_cpu_millicores, selected_resources_memory_mib), - _, - ) = tokio::try_join!( - ctx.op(build::ops::get::Input { - build_ids: vec![input.image_id], - }), - sql_fetch_all!( - [ctx, (i64, i64), pool] - " - SELECT protocol, ingress_port_number - FROM ports_ingress - ", - ), - sql_fetch_one!( - [ctx, (Option, Option), pool] - " - SELECT selected_resources_cpu_millicores, selected_resources_memory_mib - FROM state - ", - ), - // Idempotent - sql_execute!( - [ctx, pool] - " - DELETE FROM ports_proxied - ", - ), - )?; - let build = unwrap_with!(build_res.builds.first(), BUILD_NOT_FOUND); - - let destroy_runner = ctx - .fdb() - .await? - .run(|tx, _mc| { - let ingress_ports = ingress_ports.clone(); - async move { - destroy::clear_ports_and_resources( - input.actor_id, - input.image_id, - Some(build.allocation_type), - ingress_ports, - Some(input.runner_id), - Some(input.client_id), - Some(input.client_workflow_id), - selected_resources_memory_mib, - selected_resources_cpu_millicores, - &tx, - ) - .await - } - }) - .custom_instrument(tracing::info_span!("actor_clear_ports_and_resources_tx")) - .await?; - - Ok(ClearPortsAndResourcesOutput { destroy_runner }) -} - -#[derive(Debug, Serialize, Deserialize, Hash)] -pub struct SetFinishedInput {} - -#[activity(SetFinished)] -pub async fn set_finished(ctx: &ActivityCtx, input: &SetFinishedInput) -> GlobalResult<()> { - let pool = ctx.sqlite().await?; - - sql_execute!( - [ctx, pool] - " - UPDATE state - SET finish_ts = ? - ", - util::timestamp::now(), - ) - .await?; - - Ok(()) -} diff --git a/packages/edge/services/pegboard/src/workflows/client/mod.rs b/packages/edge/services/pegboard/src/workflows/client/mod.rs index 90f2d1afe3..e629e9e4a8 100644 --- a/packages/edge/services/pegboard/src/workflows/client/mod.rs +++ b/packages/edge/services/pegboard/src/workflows/client/mod.rs @@ -21,7 +21,7 @@ use sqlx::Acquire; use crate::{ client_config, keys, metrics, protocol, protocol::ClientFlavor, system_info, - workflows::actor2::Allocate, + workflows::actor::Allocate, }; mod migrations; @@ -131,7 +131,7 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu for alloc in res.allocations { ctx.v(2) .signal(alloc.signal) - .to_workflow::() + .to_workflow::() .tag("actor_id", alloc.actor_id) .send() .await?; @@ -166,11 +166,11 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu { // Try actor2 first let res = ctx - .signal(crate::workflows::actor2::StateUpdate { + .signal(crate::workflows::actor::StateUpdate { generation, state: state.clone(), }) - .to_workflow::() + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -180,11 +180,11 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu { // Try old actors let res = ctx - .signal(crate::workflows::actor::StateUpdate { + .signal(crate::workflows::actor::v1::StateUpdate { generation, state, }) - .to_workflow::() + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -272,8 +272,8 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu for actor_id in actor_ids { // Try actor2 first let res = ctx - .signal(crate::workflows::actor2::Undrain {}) - .to_workflow::() + .signal(crate::workflows::actor::Undrain {}) + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -281,8 +281,8 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { // Try old actors let res = ctx - .signal(crate::workflows::actor::Undrain {}) - .to_workflow::() + .signal(crate::workflows::actor::v1::Undrain {}) + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -307,7 +307,7 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu for alloc in res.allocations { ctx.v(2) .signal(alloc.signal) - .to_workflow::() + .to_workflow::() .tag("actor_id", alloc.actor_id) .send() .await?; @@ -321,7 +321,7 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu for alloc in res.allocations { ctx.v(2) .signal(alloc.signal) - .to_workflow::() + .to_workflow::() .tag("actor_id", alloc.actor_id) .send() .await?; @@ -364,11 +364,11 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu for (actor_id, generation) in actors { // Try actor2 first let res = ctx - .signal(crate::workflows::actor2::StateUpdate { + .signal(crate::workflows::actor::StateUpdate { generation, state: protocol::ActorState::Lost, }) - .to_workflow::() + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -376,11 +376,11 @@ pub async fn pegboard_client(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResu if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { // Try old actors let res = ctx - .signal(crate::workflows::actor::StateUpdate { + .signal(crate::workflows::actor::v1::StateUpdate { generation, state: protocol::ActorState::Lost, }) - .to_workflow::() + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -836,16 +836,16 @@ pub async fn handle_commands( if let Some(drain_timeout_ts) = drain_timeout_ts { // Try actor2 first let res = ctx - .signal(crate::workflows::actor2::Drain { drain_timeout_ts }) - .to_workflow::() + .signal(crate::workflows::actor::Drain { drain_timeout_ts }) + .to_workflow::() .tag("actor_id", actor_id) .send() .await; if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { // Try old actors - ctx.signal(crate::workflows::actor::Drain { drain_timeout_ts }) - .to_workflow::() + ctx.signal(crate::workflows::actor::v1::Drain { drain_timeout_ts }) + .to_workflow::() .tag("actor_id", actor_id) .send() .await?; @@ -863,11 +863,11 @@ pub async fn handle_commands( if matches!(signal.try_into()?, Signal::SIGTERM | Signal::SIGKILL) { // Try actor2 first let res = ctx - .signal(crate::workflows::actor2::StateUpdate { + .signal(crate::workflows::actor::StateUpdate { generation, state: protocol::ActorState::Stopping, }) - .to_workflow::() + .to_workflow::() .tag("actor_id", actor_id) .send() .await; @@ -875,11 +875,11 @@ pub async fn handle_commands( if let Some(WorkflowError::WorkflowNotFound) = res.as_workflow_error() { // Try old actors let res = ctx - .signal(crate::workflows::actor::StateUpdate { + .signal(crate::workflows::actor::v1::StateUpdate { generation, state: protocol::ActorState::Stopping, }) - .to_workflow::() + .to_workflow::() .tag("actor_id", actor_id) .send() .await; diff --git a/packages/edge/services/pegboard/src/workflows/mod.rs b/packages/edge/services/pegboard/src/workflows/mod.rs index 9e896b1c34..69eb77c1dc 100644 --- a/packages/edge/services/pegboard/src/workflows/mod.rs +++ b/packages/edge/services/pegboard/src/workflows/mod.rs @@ -1,3 +1,2 @@ pub mod actor; -pub mod actor2; pub mod client; diff --git a/scripts/openapi/gen_rust.ts b/scripts/openapi/gen_rust.ts index 985054d286..7531428bb9 100755 --- a/scripts/openapi/gen_rust.ts +++ b/scripts/openapi/gen_rust.ts @@ -35,7 +35,7 @@ async function generateRustSdk() { console.log("Running OpenAPI generator"); // Delete existing directories - await Deno.remove(GEN_PATH_RUST, { recursive: true }).catch(() => {}); + await Deno.remove(GEN_PATH_RUST, { recursive: true }).catch(() => { }); const dockerCmd = new Deno.Command("docker", { args: [ @@ -73,6 +73,18 @@ async function fixOpenApiBugs() { "actors_logs_api.rs": [ [/ActorsQueryLogStream/g, "crate::models::ActorsQueryLogStream"], ], + "containers_api.rs": [ + [/ContainersEndpointType/g, "crate::models::ContainersEndpointType"], + ], + "containers_logs_api.rs": [ + [/ContainersQueryLogStream/g, "crate::models::ContainersQueryLogStream"], + ], + "actors_v1_api.rs": [ + [/ActorsV1EndpointType/g, "crate::models::ActorsV1EndpointType"], + ], + "actors_v1_logs_api.rs": [ + [/ActorsV1QueryLogStream/g, "crate::models::ActorsV1QueryLogStream"], + ], "servers_logs_api.rs": [ [/ServersLogStream/g, "crate::models::ServersLogStream"], ], diff --git a/sdks/api/fern/definition/actors/__package__.yml b/sdks/api/fern/definition/actors/__package__.yml index 7aa261f32f..0dacff00e5 100644 --- a/sdks/api/fern/definition/actors/__package__.yml +++ b/sdks/api/fern/definition/actors/__package__.yml @@ -6,7 +6,7 @@ imports: service: auth: true - base-path: /actors + base-path: /v2/actors audiences: - runtime endpoints: diff --git a/sdks/api/fern/definition/actors/common.yml b/sdks/api/fern/definition/actors/common.yml index 6a62bc8c57..4c31ef7dd9 100644 --- a/sdks/api/fern/definition/actors/common.yml +++ b/sdks/api/fern/definition/actors/common.yml @@ -11,7 +11,6 @@ types: tags: unknown runtime: Runtime network: Network - resources: optional lifecycle: Lifecycle created_at: commons.Timestamp started_at: optional @@ -37,18 +36,6 @@ types: datacenter failover. The actor will not reschedule if it exits successfully. type: optional - Resources: - properties: - cpu: - docs: | - The number of CPU cores in millicores, or 1/1000 of a core. For example, - 1/8 of a core would be 125 millicores, and 1 core would be 1000 - millicores. - type: integer - memory: - docs: The amount of memory in megabytes - type: integer - Network: properties: mode: NetworkMode diff --git a/sdks/api/fern/definition/actors/logs.yml b/sdks/api/fern/definition/actors/logs.yml index bea5b1d4e1..a938b07594 100644 --- a/sdks/api/fern/definition/actors/logs.yml +++ b/sdks/api/fern/definition/actors/logs.yml @@ -5,7 +5,7 @@ imports: service: auth: true - base-path: /actors + base-path: /v2/actors audiences: - runtime endpoints: @@ -48,7 +48,7 @@ types: properties: actor_ids: docs: List of actor IDs in these logs. The order of these correspond to the index in the log entry. - type: list + type: list lines: docs: Sorted old to new. type: list diff --git a/sdks/api/fern/definition/actors/metrics.yml b/sdks/api/fern/definition/actors/metrics.yml index 7bbbbcad72..d680dd18b4 100644 --- a/sdks/api/fern/definition/actors/metrics.yml +++ b/sdks/api/fern/definition/actors/metrics.yml @@ -5,7 +5,9 @@ imports: service: auth: true - base-path: /actors + base-path: /v2/actors + audiences: + - runtime endpoints: get: path: /{actor}/metrics/history @@ -15,7 +17,7 @@ service: path-parameters: actor: docs: The id of the actor to destroy - type: uuid + type: commons.Id request: name: GetActorMetricsRequestQuery query-parameters: diff --git a/sdks/api/fern/definition/actors/v1/__package__.yml b/sdks/api/fern/definition/actors/v1/__package__.yml new file mode 100644 index 0000000000..0fcb0734c1 --- /dev/null +++ b/sdks/api/fern/definition/actors/v1/__package__.yml @@ -0,0 +1,180 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../../common.yml + localCommons: common.yml + +service: + availability: deprecated + auth: true + base-path: /actors + audiences: + - internal + endpoints: + get: + path: /{actor} + method: GET + docs: Gets a actor. + path-parameters: + actor: + docs: The id of the actor to destroy + type: uuid + request: + name: ListActorsRequestQuery + query-parameters: + project: optional + environment: optional + endpoint_type: optional + response: GetActorResponse + + list: + path: "" + method: GET + docs: >- + Lists all actors associated with the token used. Can be filtered by + tags in the query string. + request: + name: GetActorsRequestQuery + query-parameters: + project: optional + environment: optional + endpoint_type: optional + tags_json: optional + include_destroyed: optional + cursor: optional + response: ListActorsResponse + + create: + path: "" + method: POST + docs: Create a new actor. + request: + name: CreateActorRequestQuery + body: CreateActorRequest + query-parameters: + project: optional + environment: optional + endpoint_type: optional + response: CreateActorResponse + + destroy: + path: /{actor} + method: DELETE + docs: Destroy a actor. + path-parameters: + actor: + docs: The id of the actor to destroy + type: uuid + request: + name: DestroyActorRequestQuery + query-parameters: + project: optional + environment: optional + override_kill_timeout: + docs: >- + The duration to wait for in milliseconds before killing the actor. + This should be used to override the default kill timeout if a faster + time is needed, say for ignoring a graceful shutdown. + type: optional + response: DestroyActorResponse + + upgrade: + path: /{actor}/upgrade + method: POST + docs: Upgrades a actor. + path-parameters: + actor: + docs: The id of the actor to upgrade + type: uuid + request: + name: UpgradeActorRequestQuery + query-parameters: + project: optional + environment: optional + body: UpgradeActorRequest + response: UpgradeActorResponse + + upgradeAll: + path: /upgrade + method: POST + docs: Upgrades all actors matching the given tags. + request: + name: UpgradeAllActorsRequestQuery + query-parameters: + project: optional + environment: optional + body: UpgradeAllActorsRequest + response: UpgradeAllActorsResponse + +types: + GetActorResponse: + properties: + actor: localCommons.Actor + + CreateActorRequest: + properties: + region: optional + tags: unknown + build: optional + build_tags: optional + runtime: optional + network: optional + resources: optional + lifecycle: optional + + CreateActorRuntimeRequest: + properties: + # arguments: optional> + environment: optional> + network: optional + + CreateActorRuntimeNetworkRequest: + properties: + endpoint_type: localCommons.EndpointType + + CreateActorNetworkRequest: + properties: + mode: optional + ports: optional> + wait_ready: optional + + CreateActorPortRequest: + properties: + protocol: localCommons.PortProtocol + internal_port: optional + routing: optional + + CreateActorResponse: + properties: + actor: + docs: The actor that was created + type: localCommons.Actor + + DestroyActorResponse: + properties: {} + + UpgradeActorRequest: + properties: + build: optional + build_tags: optional + + UpgradeActorResponse: + properties: {} + + UpgradeAllActorsRequest: + properties: + tags: unknown + build: optional + build_tags: optional + + UpgradeAllActorsResponse: + properties: + count: long + + ListActorsResponse: + properties: + actors: + docs: A list of actors for the project associated with the token. + type: list + pagination: commons.Pagination + diff --git a/sdks/api/fern/definition/actors/v1/common.yml b/sdks/api/fern/definition/actors/v1/common.yml new file mode 100644 index 0000000000..1798bc79f1 --- /dev/null +++ b/sdks/api/fern/definition/actors/v1/common.yml @@ -0,0 +1,98 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../../common.yml + +types: + Actor: + properties: + id: uuid + region: string + tags: unknown + runtime: Runtime + network: Network + resources: optional + lifecycle: Lifecycle + created_at: commons.Timestamp + started_at: optional + destroyed_at: optional + + Runtime: + properties: + build: uuid + arguments: optional> + environment: optional> + + Lifecycle: + properties: + kill_timeout: + docs: >- + The duration to wait for in milliseconds before killing the actor. + This should be set to a safe default, and can be overridden during a + DELETE request if needed. + type: optional + durable: + docs: >- + If true, the actor will try to reschedule itself automatically in the event of a crash or a + datacenter failover. The actor will not reschedule if it exits successfully. + type: optional + + Resources: + properties: + cpu: + docs: | + The number of CPU cores in millicores, or 1/1000 of a core. For example, + 1/8 of a core would be 125 millicores, and 1 core would be 1000 + millicores. + type: integer + memory: + docs: The amount of memory in megabytes + type: integer + + Network: + properties: + mode: NetworkMode + ports: map + + NetworkMode: + enum: + - bridge + - host + + Port: + properties: + protocol: PortProtocol + internal_port: optional + hostname: optional + port: optional + path: optional + url: + docs: | + Fully formed connection URL including protocol, hostname, port, and path, if applicable. + type: optional + routing: PortRouting + + PortProtocol: + enum: + - http + - https + - tcp + - tcp_tls + - udp + + PortRouting: + properties: + guard: optional + host: optional + + GuardRouting: + properties: {} + + HostRouting: + properties: {} + + EndpointType: + enum: + - hostname + - path + diff --git a/sdks/api/fern/definition/actors/v1/logs.yml b/sdks/api/fern/definition/actors/v1/logs.yml new file mode 100644 index 0000000000..c39590b881 --- /dev/null +++ b/sdks/api/fern/definition/actors/v1/logs.yml @@ -0,0 +1,61 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../../common.yml + +service: + availability: deprecated + auth: true + base-path: /actors + audiences: + - internal + endpoints: + get: + path: /logs + method: GET + docs: >- + Returns the logs for a given actor. + request: + name: GetActorLogsRequestQuery + query-parameters: + project: optional + environment: optional + stream: QueryLogStream + actor_ids_json: string + search_text: optional + search_case_sensitive: optional + search_enable_regex: optional + watch_index: + docs: A query parameter denoting the requests watch index. + type: optional + response: GetActorLogsResponse + +types: + GetActorLogsResponse: + properties: + actor_ids: + docs: List of actor IDs in these logs. The order of these correspond to the index in the log entry. + type: list + lines: + docs: Sorted old to new. + type: list + timestamps: + docs: Sorted old to new. + type: list + streams: + docs: | + Streams the logs came from. + + 0 = stdout + 1 = stderr + type: list + actor_indices: + docs: Index of the actor that this log was for. Use this index to look the full ID in `actor_ids`. + type: list + watch: commons.WatchResponse + + QueryLogStream: + enum: + - std_out + - std_err + - all diff --git a/sdks/api/fern/definition/actors/v1/metrics.yml b/sdks/api/fern/definition/actors/v1/metrics.yml new file mode 100644 index 0000000000..3bd55c4e5e --- /dev/null +++ b/sdks/api/fern/definition/actors/v1/metrics.yml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../../common.yml + +service: + availability: deprecated + auth: true + base-path: /actors + audiences: + - internal + endpoints: + get: + path: /{actor}/metrics/history + method: GET + docs: >- + Returns the metrics for a given actor. + path-parameters: + actor: + docs: The id of the actor to destroy + type: uuid + request: + name: GetActorMetricsRequestQuery + query-parameters: + project: optional + environment: optional + start: integer + end: integer + interval: integer + response: GetActorMetricsResponse + +types: + GetActorMetricsResponse: + properties: + actor_ids: list + metric_names: list + metric_attributes: list> + metric_types: list + metric_values: list> diff --git a/sdks/api/fern/definition/api.yml b/sdks/api/fern/definition/api.yml index d4ae8fa259..9bbc57bae5 100644 --- a/sdks/api/fern/definition/api.yml +++ b/sdks/api/fern/definition/api.yml @@ -18,6 +18,7 @@ audiences: # API library. All other API endpoints are not essential for # using Rivet at runtime (e.g. cloud, identity, etc). - runtime + - internal errors: - commons.InternalError - commons.RateLimitError diff --git a/sdks/api/fern/definition/builds/common.yml b/sdks/api/fern/definition/builds/common.yml index 6dc10eb188..f559b871fd 100644 --- a/sdks/api/fern/definition/builds/common.yml +++ b/sdks/api/fern/definition/builds/common.yml @@ -25,7 +25,9 @@ types: - value: oci_bundle docs: OCI-compliant bundle. - value: javascript - docs: A JavaScript file. + docs: | + **Deprecated** + A JavaScript file. Compression: enum: diff --git a/sdks/api/fern/definition/containers/__package__.yml b/sdks/api/fern/definition/containers/__package__.yml new file mode 100644 index 0000000000..510978619a --- /dev/null +++ b/sdks/api/fern/definition/containers/__package__.yml @@ -0,0 +1,179 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../common.yml + localCommons: common.yml + +service: + auth: true + base-path: /v1/containers + audiences: + - runtime + endpoints: + get: + path: /{container} + method: GET + docs: Gets a container. + path-parameters: + container: + docs: The id of the container to destroy + type: commons.Id + request: + name: ListContainersRequestQuery + query-parameters: + project: optional + environment: optional + endpoint_type: optional + response: GetContainerResponse + + list: + path: "" + method: GET + docs: >- + Lists all containers associated with the token used. Can be filtered by + tags in the query string. + request: + name: GetContainersRequestQuery + query-parameters: + project: optional + environment: optional + endpoint_type: optional + tags_json: optional + include_destroyed: optional + cursor: optional + response: ListContainersResponse + + create: + path: "" + method: POST + docs: Create a new container. + request: + name: CreateContainerRequestQuery + body: CreateContainerRequest + query-parameters: + project: optional + environment: optional + endpoint_type: optional + response: CreateContainerResponse + + destroy: + path: /{container} + method: DELETE + docs: Destroy a container. + path-parameters: + container: + docs: The id of the container to destroy + type: commons.Id + request: + name: DestroyContainerRequestQuery + query-parameters: + project: optional + environment: optional + override_kill_timeout: + docs: >- + The duration to wait for in milliseconds before killing the container. + This should be used to override the default kill timeout if a faster + time is needed, say for ignoring a graceful shutdown. + type: optional + response: DestroyContainerResponse + + upgrade: + path: /{container}/upgrade + method: POST + docs: Upgrades a container. + path-parameters: + container: + docs: The id of the container to upgrade + type: commons.Id + request: + name: UpgradeContainerRequestQuery + query-parameters: + project: optional + environment: optional + body: UpgradeContainerRequest + response: UpgradeContainerResponse + + upgradeAll: + path: /upgrade + method: POST + docs: Upgrades all containers matching the given tags. + request: + name: UpgradeAllContainersRequestQuery + query-parameters: + project: optional + environment: optional + body: UpgradeAllContainersRequest + response: UpgradeAllContainersResponse + +types: + GetContainerResponse: + properties: + container: localCommons.Container + + CreateContainerRequest: + properties: + region: optional + tags: unknown + build: optional + build_tags: optional + runtime: optional + network: optional + resources: localCommons.Resources + lifecycle: optional + + CreateContainerRuntimeRequest: + properties: + # arguments: optional> + environment: optional> + network: optional + + CreateContainerRuntimeNetworkRequest: + properties: + endpoint_type: localCommons.EndpointType + + CreateContainerNetworkRequest: + properties: + mode: optional + ports: optional> + wait_ready: optional + + CreateContainerPortRequest: + properties: + protocol: localCommons.PortProtocol + internal_port: optional + routing: optional + + CreateContainerResponse: + properties: + container: + docs: The container that was created + type: localCommons.Container + + DestroyContainerResponse: + properties: {} + + UpgradeContainerRequest: + properties: + build: optional + build_tags: optional + + UpgradeContainerResponse: + properties: {} + + UpgradeAllContainersRequest: + properties: + tags: unknown + build: optional + build_tags: optional + + UpgradeAllContainersResponse: + properties: + count: long + + ListContainersResponse: + properties: + containers: + docs: A list of containers for the project associated with the token. + type: list + pagination: commons.Pagination + diff --git a/sdks/api/fern/definition/containers/common.yml b/sdks/api/fern/definition/containers/common.yml new file mode 100644 index 0000000000..6a15afac2c --- /dev/null +++ b/sdks/api/fern/definition/containers/common.yml @@ -0,0 +1,98 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../common.yml + +types: + Container: + properties: + id: commons.Id + region: string + tags: unknown + runtime: Runtime + network: Network + resources: Resources + lifecycle: Lifecycle + created_at: commons.Timestamp + started_at: optional + destroyed_at: optional + + Runtime: + properties: + build: uuid + arguments: optional> + environment: optional> + + Lifecycle: + properties: + kill_timeout: + docs: >- + The duration to wait for in milliseconds before killing the container. + This should be set to a safe default, and can be overridden during a + DELETE request if needed. + type: optional + durable: + docs: >- + If true, the container will try to reschedule itself automatically in the event of a crash or a + datacenter failover. The container will not reschedule if it exits successfully. + type: optional + + Resources: + properties: + cpu: + docs: | + The number of CPU cores in millicores, or 1/1000 of a core. For example, + 1/8 of a core would be 125 millicores, and 1 core would be 1000 + millicores. + type: integer + memory: + docs: The amount of memory in megabytes + type: integer + + Network: + properties: + mode: NetworkMode + ports: map + + NetworkMode: + enum: + - bridge + - host + + Port: + properties: + protocol: PortProtocol + internal_port: optional + hostname: optional + port: optional + path: optional + url: + docs: | + Fully formed connection URL including protocol, hostname, port, and path, if applicable. + type: optional + routing: PortRouting + + PortProtocol: + enum: + - http + - https + - tcp + - tcp_tls + - udp + + PortRouting: + properties: + guard: optional + host: optional + + GuardRouting: + properties: {} + + HostRouting: + properties: {} + + EndpointType: + enum: + - hostname + - path + diff --git a/sdks/api/fern/definition/containers/logs.yml b/sdks/api/fern/definition/containers/logs.yml new file mode 100644 index 0000000000..481b8dcda4 --- /dev/null +++ b/sdks/api/fern/definition/containers/logs.yml @@ -0,0 +1,63 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../common.yml + +service: + auth: true + base-path: /v1/containers + audiences: + - runtime + endpoints: + get: + path: /logs + method: GET + docs: >- + Returns the logs for a given container. + request: + name: GetContainerLogsRequestQuery + query-parameters: + project: optional + environment: optional + stream: QueryLogStream + container_ids_json: string + search_text: optional + search_case_sensitive: optional + search_enable_regex: optional + watch_index: + docs: A query parameter denoting the requests watch index. + type: optional + response: GetContainerLogsResponse + +types: + GetContainerLogsResponse: + properties: + container_ids: + docs: List of container IDs in these logs. The order of these correspond to the index in the log entry. + type: list + lines: + docs: Sorted old to new. + type: list + timestamps: + docs: Sorted old to new. + type: list + streams: + docs: | + Streams the logs came from. + + 0 = stdout + 1 = stderr + type: list + foreigns: + docs: List of flags denoting if this log is not directly from the container. + type: list + container_indices: + docs: Index of the container that this log was for. Use this index to look the full ID in `container_ids`. + type: list + watch: commons.WatchResponse + + QueryLogStream: + enum: + - std_out + - std_err + - all diff --git a/sdks/api/fern/definition/containers/metrics.yml b/sdks/api/fern/definition/containers/metrics.yml new file mode 100644 index 0000000000..40d136b185 --- /dev/null +++ b/sdks/api/fern/definition/containers/metrics.yml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + commons: ../common.yml + +service: + auth: true + base-path: /v1/containers + audiences: + - runtime + endpoints: + get: + path: /{container}/metrics/history + method: GET + docs: >- + Returns the metrics for a given container. + path-parameters: + container: + docs: The id of the container to destroy + type: commons.Id + request: + name: GetContainerMetricsRequestQuery + query-parameters: + project: optional + environment: optional + start: integer + end: integer + interval: integer + response: GetContainerMetricsResponse + +types: + GetContainerMetricsResponse: + properties: + container_ids: list + metric_names: list + metric_attributes: list> + metric_types: list + metric_values: list> diff --git a/sdks/api/full/go/actors/actors.go b/sdks/api/full/go/actors/actors.go index 353a82855a..bab2fcb7b2 100644 --- a/sdks/api/full/go/actors/actors.go +++ b/sdks/api/full/go/actors/actors.go @@ -17,7 +17,6 @@ type CreateActorRequest struct { BuildTags interface{} `json:"build_tags,omitempty"` Runtime *CreateActorRuntimeRequest `json:"runtime,omitempty"` Network *CreateActorNetworkRequest `json:"network,omitempty"` - Resources *Resources `json:"resources,omitempty"` Lifecycle *Lifecycle `json:"lifecycle,omitempty"` _rawJSON json.RawMessage diff --git a/sdks/api/full/go/actors/client/client.go b/sdks/api/full/go/actors/client/client.go index e50e8077b7..37461e8fd2 100644 --- a/sdks/api/full/go/actors/client/client.go +++ b/sdks/api/full/go/actors/client/client.go @@ -15,6 +15,7 @@ import ( actors "sdk/actors" logs "sdk/actors/logs" metrics "sdk/actors/metrics" + v1client "sdk/actors/v1/client" core "sdk/core" ) @@ -23,6 +24,7 @@ type Client struct { caller *core.Caller header http.Header + V1 *v1client.Client Logs *logs.Client Metrics *metrics.Client } @@ -36,6 +38,7 @@ func NewClient(opts ...core.ClientOption) *Client { baseURL: options.BaseURL, caller: core.NewCaller(options.HTTPClient), header: options.ToHeader(), + V1: v1client.NewClient(opts...), Logs: logs.NewClient(opts...), Metrics: metrics.NewClient(opts...), } @@ -49,7 +52,7 @@ func (c *Client) Get(ctx context.Context, actor sdk.Id, request *actors.ListActo if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v", actor) queryParams := make(url.Values) if request.Project != nil { @@ -141,7 +144,7 @@ func (c *Client) List(ctx context.Context, request *actors.GetActorsRequestQuery if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors" + endpointURL := baseURL + "/" + "v2/actors" queryParams := make(url.Values) if request.Project != nil { @@ -242,7 +245,7 @@ func (c *Client) Create(ctx context.Context, request *actors.CreateActorRequestQ if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors" + endpointURL := baseURL + "/" + "v2/actors" queryParams := make(url.Values) if request.Project != nil { @@ -337,7 +340,7 @@ func (c *Client) Destroy(ctx context.Context, actor sdk.Id, request *actors.Dest if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v", actor) queryParams := make(url.Values) if request.Project != nil { @@ -431,7 +434,7 @@ func (c *Client) Upgrade(ctx context.Context, actor sdk.Id, request *actors.Upgr if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v/upgrade", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v/upgrade", actor) queryParams := make(url.Values) if request.Project != nil { @@ -521,7 +524,7 @@ func (c *Client) UpgradeAll(ctx context.Context, request *actors.UpgradeAllActor if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors/upgrade" + endpointURL := baseURL + "/" + "v2/actors/upgrade" queryParams := make(url.Values) if request.Project != nil { diff --git a/sdks/api/full/go/actors/logs.go b/sdks/api/full/go/actors/logs.go index a2de673b79..0b026abb28 100644 --- a/sdks/api/full/go/actors/logs.go +++ b/sdks/api/full/go/actors/logs.go @@ -57,7 +57,7 @@ func (e *ExportActorLogsResponse) String() string { type GetActorLogsResponse struct { // List of actor IDs in these logs. The order of these correspond to the index in the log entry. - ActorIds []sdk.Id `json:"actor_ids,omitempty"` + ActorIds []string `json:"actor_ids,omitempty"` // Sorted old to new. Lines []string `json:"lines,omitempty"` // Sorted old to new. diff --git a/sdks/api/full/go/actors/logs/client.go b/sdks/api/full/go/actors/logs/client.go index 7906455cb4..64ecec339b 100644 --- a/sdks/api/full/go/actors/logs/client.go +++ b/sdks/api/full/go/actors/logs/client.go @@ -40,7 +40,7 @@ func (c *Client) Get(ctx context.Context, request *actors.GetActorLogsRequestQue if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors/logs" + endpointURL := baseURL + "/" + "v2/actors/logs" queryParams := make(url.Values) if request.Project != nil { diff --git a/sdks/api/full/go/actors/metrics/client.go b/sdks/api/full/go/actors/metrics/client.go index 01ec21a217..54509951b0 100644 --- a/sdks/api/full/go/actors/metrics/client.go +++ b/sdks/api/full/go/actors/metrics/client.go @@ -8,7 +8,6 @@ import ( json "encoding/json" errors "errors" fmt "fmt" - uuid "github.com/google/uuid" io "io" http "net/http" url "net/url" @@ -38,12 +37,12 @@ func NewClient(opts ...core.ClientOption) *Client { // Returns the metrics for a given actor. // // The id of the actor to destroy -func (c *Client) Get(ctx context.Context, actor uuid.UUID, request *actors.GetActorMetricsRequestQuery) (*actors.GetActorMetricsResponse, error) { +func (c *Client) Get(ctx context.Context, actor sdk.Id, request *actors.GetActorMetricsRequestQuery) (*actors.GetActorMetricsResponse, error) { baseURL := "https://api.rivet.gg" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v/metrics/history", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v/metrics/history", actor) queryParams := make(url.Values) if request.Project != nil { diff --git a/sdks/api/full/go/actors/types.go b/sdks/api/full/go/actors/types.go index c6795304b1..b5af2044e9 100644 --- a/sdks/api/full/go/actors/types.go +++ b/sdks/api/full/go/actors/types.go @@ -67,7 +67,6 @@ type Actor struct { Tags interface{} `json:"tags,omitempty"` Runtime *Runtime `json:"runtime,omitempty"` Network *Network `json:"network,omitempty"` - Resources *Resources `json:"resources,omitempty"` Lifecycle *Lifecycle `json:"lifecycle,omitempty"` CreatedAt sdk.Timestamp `json:"created_at"` StartedAt *sdk.Timestamp `json:"started_at,omitempty"` @@ -356,40 +355,6 @@ func (p *PortRouting) String() string { return fmt.Sprintf("%#v", p) } -type Resources struct { - // The number of CPU cores in millicores, or 1/1000 of a core. For example, - // 1/8 of a core would be 125 millicores, and 1 core would be 1000 - // millicores. - Cpu int `json:"cpu"` - // The amount of memory in megabytes - Memory int `json:"memory"` - - _rawJSON json.RawMessage -} - -func (r *Resources) UnmarshalJSON(data []byte) error { - type unmarshaler Resources - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *r = Resources(value) - r._rawJSON = json.RawMessage(data) - return nil -} - -func (r *Resources) String() string { - if len(r._rawJSON) > 0 { - if value, err := core.StringifyJSON(r._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(r); err == nil { - return value - } - return fmt.Sprintf("%#v", r) -} - type Runtime struct { Build uuid.UUID `json:"build"` Arguments []string `json:"arguments,omitempty"` diff --git a/sdks/api/full/go/actors/v1/client/client.go b/sdks/api/full/go/actors/v1/client/client.go new file mode 100644 index 0000000000..2f945d94b3 --- /dev/null +++ b/sdks/api/full/go/actors/v1/client/client.go @@ -0,0 +1,607 @@ +// This file was auto-generated by Fern from our API Definition. + +package client + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + uuid "github.com/google/uuid" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + v1 "sdk/actors/v1" + logs "sdk/actors/v1/logs" + metrics "sdk/actors/v1/metrics" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header + + Logs *logs.Client + Metrics *metrics.Client +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + Logs: logs.NewClient(opts...), + Metrics: metrics.NewClient(opts...), + } +} + +// Gets a actor. +// +// The id of the actor to destroy +func (c *Client) Get(ctx context.Context, actor uuid.UUID, request *v1.ListActorsRequestQuery) (*v1.GetActorResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v", actor) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.GetActorResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Lists all actors associated with the token used. Can be filtered by tags in the query string. +func (c *Client) List(ctx context.Context, request *v1.GetActorsRequestQuery) (*v1.ListActorsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "actors" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if request.TagsJson != nil { + queryParams.Add("tags_json", fmt.Sprintf("%v", *request.TagsJson)) + } + if request.IncludeDestroyed != nil { + queryParams.Add("include_destroyed", fmt.Sprintf("%v", *request.IncludeDestroyed)) + } + if request.Cursor != nil { + queryParams.Add("cursor", fmt.Sprintf("%v", *request.Cursor)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.ListActorsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Create a new actor. +func (c *Client) Create(ctx context.Context, request *v1.CreateActorRequestQuery) (*v1.CreateActorResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "actors" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.CreateActorResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Destroy a actor. +// +// The id of the actor to destroy +func (c *Client) Destroy(ctx context.Context, actor uuid.UUID, request *v1.DestroyActorRequestQuery) (*v1.DestroyActorResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v", actor) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.OverrideKillTimeout != nil { + queryParams.Add("override_kill_timeout", fmt.Sprintf("%v", *request.OverrideKillTimeout)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.DestroyActorResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodDelete, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Upgrades a actor. +// +// The id of the actor to upgrade +func (c *Client) Upgrade(ctx context.Context, actor uuid.UUID, request *v1.UpgradeActorRequestQuery) (*v1.UpgradeActorResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v/upgrade", actor) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.UpgradeActorResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Upgrades all actors matching the given tags. +func (c *Client) UpgradeAll(ctx context.Context, request *v1.UpgradeAllActorsRequestQuery) (*v1.UpgradeAllActorsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "actors/upgrade" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.UpgradeAllActorsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/full/go/actors/v1/logs.go b/sdks/api/full/go/actors/v1/logs.go new file mode 100644 index 0000000000..55d3c1727c --- /dev/null +++ b/sdks/api/full/go/actors/v1/logs.go @@ -0,0 +1,89 @@ +// This file was auto-generated by Fern from our API Definition. + +package v1 + +import ( + json "encoding/json" + fmt "fmt" + sdk "sdk" + core "sdk/core" +) + +type GetActorLogsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Stream QueryLogStream `json:"-"` + ActorIdsJson string `json:"-"` + SearchText *string `json:"-"` + SearchCaseSensitive *bool `json:"-"` + SearchEnableRegex *bool `json:"-"` + // A query parameter denoting the requests watch index. + WatchIndex *string `json:"-"` +} + +type GetActorLogsResponse struct { + // List of actor IDs in these logs. The order of these correspond to the index in the log entry. + ActorIds []string `json:"actor_ids,omitempty"` + // Sorted old to new. + Lines []string `json:"lines,omitempty"` + // Sorted old to new. + Timestamps []sdk.Timestamp `json:"timestamps,omitempty"` + // Streams the logs came from. + // + // 0 = stdout + // 1 = stderr + Streams []int `json:"streams,omitempty"` + // Index of the actor that this log was for. Use this index to look the full ID in `actor_ids`. + ActorIndices []int `json:"actor_indices,omitempty"` + Watch *sdk.WatchResponse `json:"watch,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetActorLogsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetActorLogsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetActorLogsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetActorLogsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type QueryLogStream string + +const ( + QueryLogStreamStdOut QueryLogStream = "std_out" + QueryLogStreamStdErr QueryLogStream = "std_err" + QueryLogStreamAll QueryLogStream = "all" +) + +func NewQueryLogStreamFromString(s string) (QueryLogStream, error) { + switch s { + case "std_out": + return QueryLogStreamStdOut, nil + case "std_err": + return QueryLogStreamStdErr, nil + case "all": + return QueryLogStreamAll, nil + } + var t QueryLogStream + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (q QueryLogStream) Ptr() *QueryLogStream { + return &q +} diff --git a/sdks/api/full/go/actors/v1/logs/client.go b/sdks/api/full/go/actors/v1/logs/client.go new file mode 100644 index 0000000000..c8a6cbdf44 --- /dev/null +++ b/sdks/api/full/go/actors/v1/logs/client.go @@ -0,0 +1,138 @@ +// This file was auto-generated by Fern from our API Definition. + +package logs + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + v1 "sdk/actors/v1" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the logs for a given actor. +func (c *Client) Get(ctx context.Context, request *v1.GetActorLogsRequestQuery) (*v1.GetActorLogsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "actors/logs" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("stream", fmt.Sprintf("%v", request.Stream)) + queryParams.Add("actor_ids_json", fmt.Sprintf("%v", request.ActorIdsJson)) + if request.SearchText != nil { + queryParams.Add("search_text", fmt.Sprintf("%v", *request.SearchText)) + } + if request.SearchCaseSensitive != nil { + queryParams.Add("search_case_sensitive", fmt.Sprintf("%v", *request.SearchCaseSensitive)) + } + if request.SearchEnableRegex != nil { + queryParams.Add("search_enable_regex", fmt.Sprintf("%v", *request.SearchEnableRegex)) + } + if request.WatchIndex != nil { + queryParams.Add("watch_index", fmt.Sprintf("%v", *request.WatchIndex)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.GetActorLogsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/full/go/actors/v1/metrics.go b/sdks/api/full/go/actors/v1/metrics.go new file mode 100644 index 0000000000..d3d536cec6 --- /dev/null +++ b/sdks/api/full/go/actors/v1/metrics.go @@ -0,0 +1,50 @@ +// This file was auto-generated by Fern from our API Definition. + +package v1 + +import ( + json "encoding/json" + fmt "fmt" + core "sdk/core" +) + +type GetActorMetricsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Start int `json:"-"` + End int `json:"-"` + Interval int `json:"-"` +} + +type GetActorMetricsResponse struct { + ActorIds []string `json:"actor_ids,omitempty"` + MetricNames []string `json:"metric_names,omitempty"` + MetricAttributes []map[string]string `json:"metric_attributes,omitempty"` + MetricTypes []string `json:"metric_types,omitempty"` + MetricValues [][]float64 `json:"metric_values,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetActorMetricsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetActorMetricsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetActorMetricsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetActorMetricsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} diff --git a/sdks/api/full/go/actors/v1/metrics/client.go b/sdks/api/full/go/actors/v1/metrics/client.go new file mode 100644 index 0000000000..b5960404a0 --- /dev/null +++ b/sdks/api/full/go/actors/v1/metrics/client.go @@ -0,0 +1,130 @@ +// This file was auto-generated by Fern from our API Definition. + +package metrics + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + uuid "github.com/google/uuid" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + v1 "sdk/actors/v1" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the metrics for a given actor. +// +// The id of the actor to destroy +func (c *Client) Get(ctx context.Context, actor uuid.UUID, request *v1.GetActorMetricsRequestQuery) (*v1.GetActorMetricsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v/metrics/history", actor) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("start", fmt.Sprintf("%v", request.Start)) + queryParams.Add("end", fmt.Sprintf("%v", request.End)) + queryParams.Add("interval", fmt.Sprintf("%v", request.Interval)) + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *v1.GetActorMetricsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/full/go/actors/v1/types.go b/sdks/api/full/go/actors/v1/types.go new file mode 100644 index 0000000000..e6332de451 --- /dev/null +++ b/sdks/api/full/go/actors/v1/types.go @@ -0,0 +1,572 @@ +// This file was auto-generated by Fern from our API Definition. + +package v1 + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + sdk "sdk" + core "sdk/core" +) + +type CreateActorRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` + Body *CreateActorRequest `json:"-"` +} + +func (c *CreateActorRequestQuery) UnmarshalJSON(data []byte) error { + body := new(CreateActorRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + c.Body = body + return nil +} + +func (c *CreateActorRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(c.Body) +} + +type DestroyActorRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + // The duration to wait for in milliseconds before killing the actor. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. + OverrideKillTimeout *int64 `json:"-"` +} + +type ListActorsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` +} + +type GetActorsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` + TagsJson *string `json:"-"` + IncludeDestroyed *bool `json:"-"` + Cursor *string `json:"-"` +} + +type Actor struct { + Id uuid.UUID `json:"id"` + Region string `json:"region"` + Tags interface{} `json:"tags,omitempty"` + Runtime *Runtime `json:"runtime,omitempty"` + Network *Network `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + CreatedAt sdk.Timestamp `json:"created_at"` + StartedAt *sdk.Timestamp `json:"started_at,omitempty"` + DestroyedAt *sdk.Timestamp `json:"destroyed_at,omitempty"` + + _rawJSON json.RawMessage +} + +func (a *Actor) UnmarshalJSON(data []byte) error { + type unmarshaler Actor + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *a = Actor(value) + a._rawJSON = json.RawMessage(data) + return nil +} + +func (a *Actor) String() string { + if len(a._rawJSON) > 0 { + if value, err := core.StringifyJSON(a._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(a); err == nil { + return value + } + return fmt.Sprintf("%#v", a) +} + +type EndpointType string + +const ( + EndpointTypeHostname EndpointType = "hostname" + EndpointTypePath EndpointType = "path" +) + +func NewEndpointTypeFromString(s string) (EndpointType, error) { + switch s { + case "hostname": + return EndpointTypeHostname, nil + case "path": + return EndpointTypePath, nil + } + var t EndpointType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EndpointType) Ptr() *EndpointType { + return &e +} + +type GuardRouting struct { + _rawJSON json.RawMessage +} + +func (g *GuardRouting) UnmarshalJSON(data []byte) error { + type unmarshaler GuardRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GuardRouting(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GuardRouting) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type HostRouting struct { + _rawJSON json.RawMessage +} + +func (h *HostRouting) UnmarshalJSON(data []byte) error { + type unmarshaler HostRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *h = HostRouting(value) + h._rawJSON = json.RawMessage(data) + return nil +} + +func (h *HostRouting) String() string { + if len(h._rawJSON) > 0 { + if value, err := core.StringifyJSON(h._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(h); err == nil { + return value + } + return fmt.Sprintf("%#v", h) +} + +type Lifecycle struct { + // The duration to wait for in milliseconds before killing the actor. This should be set to a safe default, and can be overridden during a DELETE request if needed. + KillTimeout *int64 `json:"kill_timeout,omitempty"` + // If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. + Durable *bool `json:"durable,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *Lifecycle) UnmarshalJSON(data []byte) error { + type unmarshaler Lifecycle + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = Lifecycle(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *Lifecycle) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type Network struct { + Mode NetworkMode `json:"mode,omitempty"` + Ports map[string]*Port `json:"ports,omitempty"` + + _rawJSON json.RawMessage +} + +func (n *Network) UnmarshalJSON(data []byte) error { + type unmarshaler Network + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *n = Network(value) + n._rawJSON = json.RawMessage(data) + return nil +} + +func (n *Network) String() string { + if len(n._rawJSON) > 0 { + if value, err := core.StringifyJSON(n._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(n); err == nil { + return value + } + return fmt.Sprintf("%#v", n) +} + +type NetworkMode string + +const ( + NetworkModeBridge NetworkMode = "bridge" + NetworkModeHost NetworkMode = "host" +) + +func NewNetworkModeFromString(s string) (NetworkMode, error) { + switch s { + case "bridge": + return NetworkModeBridge, nil + case "host": + return NetworkModeHost, nil + } + var t NetworkMode + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (n NetworkMode) Ptr() *NetworkMode { + return &n +} + +type Port struct { + Protocol PortProtocol `json:"protocol,omitempty"` + InternalPort *int `json:"internal_port,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Port *int `json:"port,omitempty"` + Path *string `json:"path,omitempty"` + // Fully formed connection URL including protocol, hostname, port, and path, if applicable. + Url *string `json:"url,omitempty"` + Routing *PortRouting `json:"routing,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *Port) UnmarshalJSON(data []byte) error { + type unmarshaler Port + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = Port(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *Port) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + +type PortProtocol string + +const ( + PortProtocolHttp PortProtocol = "http" + PortProtocolHttps PortProtocol = "https" + PortProtocolTcp PortProtocol = "tcp" + PortProtocolTcpTls PortProtocol = "tcp_tls" + PortProtocolUdp PortProtocol = "udp" +) + +func NewPortProtocolFromString(s string) (PortProtocol, error) { + switch s { + case "http": + return PortProtocolHttp, nil + case "https": + return PortProtocolHttps, nil + case "tcp": + return PortProtocolTcp, nil + case "tcp_tls": + return PortProtocolTcpTls, nil + case "udp": + return PortProtocolUdp, nil + } + var t PortProtocol + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (p PortProtocol) Ptr() *PortProtocol { + return &p +} + +type PortRouting struct { + Guard *GuardRouting `json:"guard,omitempty"` + Host *HostRouting `json:"host,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *PortRouting) UnmarshalJSON(data []byte) error { + type unmarshaler PortRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = PortRouting(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *PortRouting) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + +type Resources struct { + // The number of CPU cores in millicores, or 1/1000 of a core. For example, + // 1/8 of a core would be 125 millicores, and 1 core would be 1000 + // millicores. + Cpu int `json:"cpu"` + // The amount of memory in megabytes + Memory int `json:"memory"` + + _rawJSON json.RawMessage +} + +func (r *Resources) UnmarshalJSON(data []byte) error { + type unmarshaler Resources + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *r = Resources(value) + r._rawJSON = json.RawMessage(data) + return nil +} + +func (r *Resources) String() string { + if len(r._rawJSON) > 0 { + if value, err := core.StringifyJSON(r._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(r); err == nil { + return value + } + return fmt.Sprintf("%#v", r) +} + +type Runtime struct { + Build uuid.UUID `json:"build"` + Arguments []string `json:"arguments,omitempty"` + Environment map[string]string `json:"environment,omitempty"` + + _rawJSON json.RawMessage +} + +func (r *Runtime) UnmarshalJSON(data []byte) error { + type unmarshaler Runtime + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *r = Runtime(value) + r._rawJSON = json.RawMessage(data) + return nil +} + +func (r *Runtime) String() string { + if len(r._rawJSON) > 0 { + if value, err := core.StringifyJSON(r._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(r); err == nil { + return value + } + return fmt.Sprintf("%#v", r) +} + +type CreateActorNetworkRequest struct { + Mode *NetworkMode `json:"mode,omitempty"` + Ports map[string]*CreateActorPortRequest `json:"ports,omitempty"` + WaitReady *bool `json:"wait_ready,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateActorNetworkRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateActorNetworkRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateActorNetworkRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateActorNetworkRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateActorPortRequest struct { + Protocol PortProtocol `json:"protocol,omitempty"` + InternalPort *int `json:"internal_port,omitempty"` + Routing *PortRouting `json:"routing,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateActorPortRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateActorPortRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateActorPortRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateActorPortRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateActorRuntimeNetworkRequest struct { + EndpointType EndpointType `json:"endpoint_type,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateActorRuntimeNetworkRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateActorRuntimeNetworkRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateActorRuntimeNetworkRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateActorRuntimeNetworkRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateActorRuntimeRequest struct { + Environment map[string]string `json:"environment,omitempty"` + Network *CreateActorRuntimeNetworkRequest `json:"network,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateActorRuntimeRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateActorRuntimeRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateActorRuntimeRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateActorRuntimeRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type UpgradeActorRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Body *UpgradeActorRequest `json:"-"` +} + +func (u *UpgradeActorRequestQuery) UnmarshalJSON(data []byte) error { + body := new(UpgradeActorRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + u.Body = body + return nil +} + +func (u *UpgradeActorRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(u.Body) +} + +type UpgradeAllActorsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Body *UpgradeAllActorsRequest `json:"-"` +} + +func (u *UpgradeAllActorsRequestQuery) UnmarshalJSON(data []byte) error { + body := new(UpgradeAllActorsRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + u.Body = body + return nil +} + +func (u *UpgradeAllActorsRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(u.Body) +} diff --git a/sdks/api/full/go/actors/v1/v_1.go b/sdks/api/full/go/actors/v1/v_1.go new file mode 100644 index 0000000000..5914e7d587 --- /dev/null +++ b/sdks/api/full/go/actors/v1/v_1.go @@ -0,0 +1,281 @@ +// This file was auto-generated by Fern from our API Definition. + +package v1 + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + sdk "sdk" + core "sdk/core" +) + +type CreateActorRequest struct { + Region *string `json:"region,omitempty"` + Tags interface{} `json:"tags,omitempty"` + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + Runtime *CreateActorRuntimeRequest `json:"runtime,omitempty"` + Network *CreateActorNetworkRequest `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateActorRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateActorRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateActorRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateActorRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateActorResponse struct { + // The actor that was created + Actor *Actor `json:"actor,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateActorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler CreateActorResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateActorResponse(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateActorResponse) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type DestroyActorResponse struct { + _rawJSON json.RawMessage +} + +func (d *DestroyActorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DestroyActorResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DestroyActorResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DestroyActorResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type GetActorResponse struct { + Actor *Actor `json:"actor,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetActorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetActorResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetActorResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetActorResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type ListActorsResponse struct { + // A list of actors for the project associated with the token. + Actors []*Actor `json:"actors,omitempty"` + Pagination *sdk.Pagination `json:"pagination,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *ListActorsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler ListActorsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = ListActorsResponse(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *ListActorsResponse) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type UpgradeActorRequest struct { + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeActorRequest) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeActorRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeActorRequest(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeActorRequest) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeActorResponse struct { + _rawJSON json.RawMessage +} + +func (u *UpgradeActorResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeActorResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeActorResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeActorResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeAllActorsRequest struct { + Tags interface{} `json:"tags,omitempty"` + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeAllActorsRequest) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeAllActorsRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeAllActorsRequest(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeAllActorsRequest) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeAllActorsResponse struct { + Count int64 `json:"count"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeAllActorsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeAllActorsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeAllActorsResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeAllActorsResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} diff --git a/sdks/api/full/go/client/client.go b/sdks/api/full/go/client/client.go index 9e2c8be8d5..917cf299ae 100644 --- a/sdks/api/full/go/client/client.go +++ b/sdks/api/full/go/client/client.go @@ -8,6 +8,7 @@ import ( authclient "sdk/auth/client" buildsclient "sdk/builds/client" cloudclient "sdk/cloud/client" + containersclient "sdk/containers/client" core "sdk/core" coreintercomclient "sdk/coreintercom/client" edgeintercomclient "sdk/edgeintercom/client" @@ -31,6 +32,7 @@ type Client struct { Actors *actorsclient.Client Builds *buildsclient.Client Cloud *cloudclient.Client + Containers *containersclient.Client CoreIntercom *coreintercomclient.Client EdgeIntercom *edgeintercomclient.Client Group *groupclient.Client @@ -58,6 +60,7 @@ func NewClient(opts ...core.ClientOption) *Client { Actors: actorsclient.NewClient(opts...), Builds: buildsclient.NewClient(opts...), Cloud: cloudclient.NewClient(opts...), + Containers: containersclient.NewClient(opts...), CoreIntercom: coreintercomclient.NewClient(opts...), EdgeIntercom: edgeintercomclient.NewClient(opts...), Group: groupclient.NewClient(opts...), diff --git a/sdks/api/full/go/containers/client/client.go b/sdks/api/full/go/containers/client/client.go new file mode 100644 index 0000000000..4e6244b72a --- /dev/null +++ b/sdks/api/full/go/containers/client/client.go @@ -0,0 +1,606 @@ +// This file was auto-generated by Fern from our API Definition. + +package client + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + containers "sdk/containers" + logs "sdk/containers/logs" + metrics "sdk/containers/metrics" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header + + Logs *logs.Client + Metrics *metrics.Client +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + Logs: logs.NewClient(opts...), + Metrics: metrics.NewClient(opts...), + } +} + +// Gets a container. +// +// The id of the container to destroy +func (c *Client) Get(ctx context.Context, container sdk.Id, request *containers.ListContainersRequestQuery) (*containers.GetContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.GetContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Lists all containers associated with the token used. Can be filtered by tags in the query string. +func (c *Client) List(ctx context.Context, request *containers.GetContainersRequestQuery) (*containers.ListContainersResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if request.TagsJson != nil { + queryParams.Add("tags_json", fmt.Sprintf("%v", *request.TagsJson)) + } + if request.IncludeDestroyed != nil { + queryParams.Add("include_destroyed", fmt.Sprintf("%v", *request.IncludeDestroyed)) + } + if request.Cursor != nil { + queryParams.Add("cursor", fmt.Sprintf("%v", *request.Cursor)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.ListContainersResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Create a new container. +func (c *Client) Create(ctx context.Context, request *containers.CreateContainerRequestQuery) (*containers.CreateContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.CreateContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Destroy a container. +// +// The id of the container to destroy +func (c *Client) Destroy(ctx context.Context, container sdk.Id, request *containers.DestroyContainerRequestQuery) (*containers.DestroyContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.OverrideKillTimeout != nil { + queryParams.Add("override_kill_timeout", fmt.Sprintf("%v", *request.OverrideKillTimeout)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.DestroyContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodDelete, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Upgrades a container. +// +// The id of the container to upgrade +func (c *Client) Upgrade(ctx context.Context, container sdk.Id, request *containers.UpgradeContainerRequestQuery) (*containers.UpgradeContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v/upgrade", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.UpgradeContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Upgrades all containers matching the given tags. +func (c *Client) UpgradeAll(ctx context.Context, request *containers.UpgradeAllContainersRequestQuery) (*containers.UpgradeAllContainersResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers/upgrade" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.UpgradeAllContainersResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/full/go/containers/containers.go b/sdks/api/full/go/containers/containers.go new file mode 100644 index 0000000000..63b15b5a86 --- /dev/null +++ b/sdks/api/full/go/containers/containers.go @@ -0,0 +1,281 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + sdk "sdk" + core "sdk/core" +) + +type CreateContainerRequest struct { + Region *string `json:"region,omitempty"` + Tags interface{} `json:"tags,omitempty"` + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + Runtime *CreateContainerRuntimeRequest `json:"runtime,omitempty"` + Network *CreateContainerNetworkRequest `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerResponse struct { + // The container that was created + Container *Container `json:"container,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerResponse(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerResponse) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type DestroyContainerResponse struct { + _rawJSON json.RawMessage +} + +func (d *DestroyContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DestroyContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DestroyContainerResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DestroyContainerResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type GetContainerResponse struct { + Container *Container `json:"container,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetContainerResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetContainerResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type ListContainersResponse struct { + // A list of containers for the project associated with the token. + Containers []*Container `json:"containers,omitempty"` + Pagination *sdk.Pagination `json:"pagination,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *ListContainersResponse) UnmarshalJSON(data []byte) error { + type unmarshaler ListContainersResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = ListContainersResponse(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *ListContainersResponse) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type UpgradeAllContainersRequest struct { + Tags interface{} `json:"tags,omitempty"` + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeAllContainersRequest) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeAllContainersRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeAllContainersRequest(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeAllContainersRequest) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeAllContainersResponse struct { + Count int64 `json:"count"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeAllContainersResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeAllContainersResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeAllContainersResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeAllContainersResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeContainerRequest struct { + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeContainerRequest) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeContainerRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeContainerRequest(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeContainerRequest) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeContainerResponse struct { + _rawJSON json.RawMessage +} + +func (u *UpgradeContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeContainerResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeContainerResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} diff --git a/sdks/api/full/go/containers/logs.go b/sdks/api/full/go/containers/logs.go new file mode 100644 index 0000000000..3eec84bace --- /dev/null +++ b/sdks/api/full/go/containers/logs.go @@ -0,0 +1,91 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + sdk "sdk" + core "sdk/core" +) + +type GetContainerLogsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Stream QueryLogStream `json:"-"` + ContainerIdsJson string `json:"-"` + SearchText *string `json:"-"` + SearchCaseSensitive *bool `json:"-"` + SearchEnableRegex *bool `json:"-"` + // A query parameter denoting the requests watch index. + WatchIndex *string `json:"-"` +} + +type GetContainerLogsResponse struct { + // List of container IDs in these logs. The order of these correspond to the index in the log entry. + ContainerIds []sdk.Id `json:"container_ids,omitempty"` + // Sorted old to new. + Lines []string `json:"lines,omitempty"` + // Sorted old to new. + Timestamps []sdk.Timestamp `json:"timestamps,omitempty"` + // Streams the logs came from. + // + // 0 = stdout + // 1 = stderr + Streams []int `json:"streams,omitempty"` + // List of flags denoting if this log is not directly from the container. + Foreigns []bool `json:"foreigns,omitempty"` + // Index of the container that this log was for. Use this index to look the full ID in `container_ids`. + ContainerIndices []int `json:"container_indices,omitempty"` + Watch *sdk.WatchResponse `json:"watch,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetContainerLogsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetContainerLogsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetContainerLogsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetContainerLogsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type QueryLogStream string + +const ( + QueryLogStreamStdOut QueryLogStream = "std_out" + QueryLogStreamStdErr QueryLogStream = "std_err" + QueryLogStreamAll QueryLogStream = "all" +) + +func NewQueryLogStreamFromString(s string) (QueryLogStream, error) { + switch s { + case "std_out": + return QueryLogStreamStdOut, nil + case "std_err": + return QueryLogStreamStdErr, nil + case "all": + return QueryLogStreamAll, nil + } + var t QueryLogStream + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (q QueryLogStream) Ptr() *QueryLogStream { + return &q +} diff --git a/sdks/api/full/go/containers/logs/client.go b/sdks/api/full/go/containers/logs/client.go new file mode 100644 index 0000000000..be5ec19067 --- /dev/null +++ b/sdks/api/full/go/containers/logs/client.go @@ -0,0 +1,138 @@ +// This file was auto-generated by Fern from our API Definition. + +package logs + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + containers "sdk/containers" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the logs for a given container. +func (c *Client) Get(ctx context.Context, request *containers.GetContainerLogsRequestQuery) (*containers.GetContainerLogsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers/logs" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("stream", fmt.Sprintf("%v", request.Stream)) + queryParams.Add("container_ids_json", fmt.Sprintf("%v", request.ContainerIdsJson)) + if request.SearchText != nil { + queryParams.Add("search_text", fmt.Sprintf("%v", *request.SearchText)) + } + if request.SearchCaseSensitive != nil { + queryParams.Add("search_case_sensitive", fmt.Sprintf("%v", *request.SearchCaseSensitive)) + } + if request.SearchEnableRegex != nil { + queryParams.Add("search_enable_regex", fmt.Sprintf("%v", *request.SearchEnableRegex)) + } + if request.WatchIndex != nil { + queryParams.Add("watch_index", fmt.Sprintf("%v", *request.WatchIndex)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.GetContainerLogsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/full/go/containers/metrics.go b/sdks/api/full/go/containers/metrics.go new file mode 100644 index 0000000000..ba8e18bec0 --- /dev/null +++ b/sdks/api/full/go/containers/metrics.go @@ -0,0 +1,50 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + core "sdk/core" +) + +type GetContainerMetricsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Start int `json:"-"` + End int `json:"-"` + Interval int `json:"-"` +} + +type GetContainerMetricsResponse struct { + ContainerIds []string `json:"container_ids,omitempty"` + MetricNames []string `json:"metric_names,omitempty"` + MetricAttributes []map[string]string `json:"metric_attributes,omitempty"` + MetricTypes []string `json:"metric_types,omitempty"` + MetricValues [][]float64 `json:"metric_values,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetContainerMetricsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetContainerMetricsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetContainerMetricsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetContainerMetricsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} diff --git a/sdks/api/full/go/containers/metrics/client.go b/sdks/api/full/go/containers/metrics/client.go new file mode 100644 index 0000000000..d891ab58d9 --- /dev/null +++ b/sdks/api/full/go/containers/metrics/client.go @@ -0,0 +1,129 @@ +// This file was auto-generated by Fern from our API Definition. + +package metrics + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + containers "sdk/containers" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the metrics for a given container. +// +// The id of the container to destroy +func (c *Client) Get(ctx context.Context, container sdk.Id, request *containers.GetContainerMetricsRequestQuery) (*containers.GetContainerMetricsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v/metrics/history", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("start", fmt.Sprintf("%v", request.Start)) + queryParams.Add("end", fmt.Sprintf("%v", request.End)) + queryParams.Add("interval", fmt.Sprintf("%v", request.Interval)) + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.GetContainerMetricsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/full/go/containers/types.go b/sdks/api/full/go/containers/types.go new file mode 100644 index 0000000000..383dcf1cc9 --- /dev/null +++ b/sdks/api/full/go/containers/types.go @@ -0,0 +1,572 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + sdk "sdk" + core "sdk/core" +) + +type CreateContainerRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` + Body *CreateContainerRequest `json:"-"` +} + +func (c *CreateContainerRequestQuery) UnmarshalJSON(data []byte) error { + body := new(CreateContainerRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + c.Body = body + return nil +} + +func (c *CreateContainerRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(c.Body) +} + +type DestroyContainerRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + // The duration to wait for in milliseconds before killing the container. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. + OverrideKillTimeout *int64 `json:"-"` +} + +type ListContainersRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` +} + +type GetContainersRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` + TagsJson *string `json:"-"` + IncludeDestroyed *bool `json:"-"` + Cursor *string `json:"-"` +} + +type Container struct { + Id sdk.Id `json:"id"` + Region string `json:"region"` + Tags interface{} `json:"tags,omitempty"` + Runtime *Runtime `json:"runtime,omitempty"` + Network *Network `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + CreatedAt sdk.Timestamp `json:"created_at"` + StartedAt *sdk.Timestamp `json:"started_at,omitempty"` + DestroyedAt *sdk.Timestamp `json:"destroyed_at,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *Container) UnmarshalJSON(data []byte) error { + type unmarshaler Container + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = Container(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *Container) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type EndpointType string + +const ( + EndpointTypeHostname EndpointType = "hostname" + EndpointTypePath EndpointType = "path" +) + +func NewEndpointTypeFromString(s string) (EndpointType, error) { + switch s { + case "hostname": + return EndpointTypeHostname, nil + case "path": + return EndpointTypePath, nil + } + var t EndpointType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EndpointType) Ptr() *EndpointType { + return &e +} + +type GuardRouting struct { + _rawJSON json.RawMessage +} + +func (g *GuardRouting) UnmarshalJSON(data []byte) error { + type unmarshaler GuardRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GuardRouting(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GuardRouting) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type HostRouting struct { + _rawJSON json.RawMessage +} + +func (h *HostRouting) UnmarshalJSON(data []byte) error { + type unmarshaler HostRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *h = HostRouting(value) + h._rawJSON = json.RawMessage(data) + return nil +} + +func (h *HostRouting) String() string { + if len(h._rawJSON) > 0 { + if value, err := core.StringifyJSON(h._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(h); err == nil { + return value + } + return fmt.Sprintf("%#v", h) +} + +type Lifecycle struct { + // The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. + KillTimeout *int64 `json:"kill_timeout,omitempty"` + // If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. + Durable *bool `json:"durable,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *Lifecycle) UnmarshalJSON(data []byte) error { + type unmarshaler Lifecycle + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = Lifecycle(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *Lifecycle) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type Network struct { + Mode NetworkMode `json:"mode,omitempty"` + Ports map[string]*Port `json:"ports,omitempty"` + + _rawJSON json.RawMessage +} + +func (n *Network) UnmarshalJSON(data []byte) error { + type unmarshaler Network + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *n = Network(value) + n._rawJSON = json.RawMessage(data) + return nil +} + +func (n *Network) String() string { + if len(n._rawJSON) > 0 { + if value, err := core.StringifyJSON(n._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(n); err == nil { + return value + } + return fmt.Sprintf("%#v", n) +} + +type NetworkMode string + +const ( + NetworkModeBridge NetworkMode = "bridge" + NetworkModeHost NetworkMode = "host" +) + +func NewNetworkModeFromString(s string) (NetworkMode, error) { + switch s { + case "bridge": + return NetworkModeBridge, nil + case "host": + return NetworkModeHost, nil + } + var t NetworkMode + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (n NetworkMode) Ptr() *NetworkMode { + return &n +} + +type Port struct { + Protocol PortProtocol `json:"protocol,omitempty"` + InternalPort *int `json:"internal_port,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Port *int `json:"port,omitempty"` + Path *string `json:"path,omitempty"` + // Fully formed connection URL including protocol, hostname, port, and path, if applicable. + Url *string `json:"url,omitempty"` + Routing *PortRouting `json:"routing,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *Port) UnmarshalJSON(data []byte) error { + type unmarshaler Port + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = Port(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *Port) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + +type PortProtocol string + +const ( + PortProtocolHttp PortProtocol = "http" + PortProtocolHttps PortProtocol = "https" + PortProtocolTcp PortProtocol = "tcp" + PortProtocolTcpTls PortProtocol = "tcp_tls" + PortProtocolUdp PortProtocol = "udp" +) + +func NewPortProtocolFromString(s string) (PortProtocol, error) { + switch s { + case "http": + return PortProtocolHttp, nil + case "https": + return PortProtocolHttps, nil + case "tcp": + return PortProtocolTcp, nil + case "tcp_tls": + return PortProtocolTcpTls, nil + case "udp": + return PortProtocolUdp, nil + } + var t PortProtocol + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (p PortProtocol) Ptr() *PortProtocol { + return &p +} + +type PortRouting struct { + Guard *GuardRouting `json:"guard,omitempty"` + Host *HostRouting `json:"host,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *PortRouting) UnmarshalJSON(data []byte) error { + type unmarshaler PortRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = PortRouting(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *PortRouting) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + +type Resources struct { + // The number of CPU cores in millicores, or 1/1000 of a core. For example, + // 1/8 of a core would be 125 millicores, and 1 core would be 1000 + // millicores. + Cpu int `json:"cpu"` + // The amount of memory in megabytes + Memory int `json:"memory"` + + _rawJSON json.RawMessage +} + +func (r *Resources) UnmarshalJSON(data []byte) error { + type unmarshaler Resources + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *r = Resources(value) + r._rawJSON = json.RawMessage(data) + return nil +} + +func (r *Resources) String() string { + if len(r._rawJSON) > 0 { + if value, err := core.StringifyJSON(r._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(r); err == nil { + return value + } + return fmt.Sprintf("%#v", r) +} + +type Runtime struct { + Build uuid.UUID `json:"build"` + Arguments []string `json:"arguments,omitempty"` + Environment map[string]string `json:"environment,omitempty"` + + _rawJSON json.RawMessage +} + +func (r *Runtime) UnmarshalJSON(data []byte) error { + type unmarshaler Runtime + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *r = Runtime(value) + r._rawJSON = json.RawMessage(data) + return nil +} + +func (r *Runtime) String() string { + if len(r._rawJSON) > 0 { + if value, err := core.StringifyJSON(r._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(r); err == nil { + return value + } + return fmt.Sprintf("%#v", r) +} + +type CreateContainerNetworkRequest struct { + Mode *NetworkMode `json:"mode,omitempty"` + Ports map[string]*CreateContainerPortRequest `json:"ports,omitempty"` + WaitReady *bool `json:"wait_ready,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerNetworkRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerNetworkRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerNetworkRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerNetworkRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerPortRequest struct { + Protocol PortProtocol `json:"protocol,omitempty"` + InternalPort *int `json:"internal_port,omitempty"` + Routing *PortRouting `json:"routing,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerPortRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerPortRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerPortRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerPortRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerRuntimeNetworkRequest struct { + EndpointType EndpointType `json:"endpoint_type,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerRuntimeNetworkRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerRuntimeNetworkRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerRuntimeNetworkRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerRuntimeNetworkRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerRuntimeRequest struct { + Environment map[string]string `json:"environment,omitempty"` + Network *CreateContainerRuntimeNetworkRequest `json:"network,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerRuntimeRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerRuntimeRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerRuntimeRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerRuntimeRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type UpgradeContainerRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Body *UpgradeContainerRequest `json:"-"` +} + +func (u *UpgradeContainerRequestQuery) UnmarshalJSON(data []byte) error { + body := new(UpgradeContainerRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + u.Body = body + return nil +} + +func (u *UpgradeContainerRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(u.Body) +} + +type UpgradeAllContainersRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Body *UpgradeAllContainersRequest `json:"-"` +} + +func (u *UpgradeAllContainersRequestQuery) UnmarshalJSON(data []byte) error { + body := new(UpgradeAllContainersRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + u.Body = body + return nil +} + +func (u *UpgradeAllContainersRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(u.Body) +} diff --git a/sdks/api/full/openapi/openapi.yml b/sdks/api/full/openapi/openapi.yml index 7131c62a0f..63d95642ab 100644 --- a/sdks/api/full/openapi/openapi.yml +++ b/sdks/api/full/openapi/openapi.yml @@ -3,7 +3,7 @@ info: title: Rivet API version: '' paths: - /actors/{actor}: + /v2/actors/{actor}: get: description: Gets a actor. operationId: actors_get @@ -152,7 +152,7 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors: + /v2/actors: get: description: >- Lists all actors associated with the token used. Can be filtered by tags @@ -306,7 +306,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsCreateActorRequest' - /actors/{actor}/upgrade: + /v2/actors/{actor}/upgrade: post: description: Upgrades a actor. operationId: actors_upgrade @@ -379,7 +379,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsUpgradeActorRequest' - /actors/upgrade: + /v2/actors/upgrade: post: description: Upgrades all actors matching the given tags. operationId: actors_upgradeAll @@ -446,6 +446,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsUpgradeAllActorsRequest' +<<<<<<< HEAD /actors/usage: get: description: >- @@ -455,6 +456,22 @@ paths: tags: - Actors parameters: +======= + /actors/{actor}: + get: + description: Gets a actor. + operationId: actors_v1_get + tags: + - ActorsV1 + parameters: + - name: actor + in: path + description: The id of the actor to destroy + required: true + schema: + type: string + format: uuid +>>>>>>> 43e5048bc (fix: api changes) - name: project in: query required: false @@ -465,6 +482,7 @@ paths: required: false schema: type: string +<<<<<<< HEAD - name: start in: query description: Start timestamp in milliseconds @@ -498,13 +516,19 @@ paths: required: false schema: type: string +======= + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ActorsV1EndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorUsageResponse' + $ref: '#/components/schemas/ActorsV1GetActorResponse' '400': description: '' content: @@ -542,44 +566,51 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors/query: - get: - description: >- - Queries actors using a JSON-encoded query expression. Supports - pagination with cursor-based navigation. - operationId: actors_query + delete: + description: Destroy a actor. + operationId: actors_v1_destroy tags: - - Actors + - ActorsV1 parameters: - - name: project - in: query - required: false + - name: actor + in: path + description: The id of the actor to destroy + required: true schema: type: string - - name: environment + format: uuid + - name: project in: query required: false schema: type: string - - name: query_json + - name: environment in: query - description: JSON-encoded query expression for filtering actors required: false schema: type: string - - name: cursor + - name: override_kill_timeout in: query - description: Cursor for pagination + description: >- + The duration to wait for in milliseconds before killing the actor. + This should be used to override the default kill timeout if a faster + time is needed, say for ignoring a graceful shutdown. required: false schema: - type: string + type: integer + format: int64 +>>>>>>> 43e5048bc (fix: api changes) responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsQueryActorsResponse' +<<<<<<< HEAD + $ref: '#/components/schemas/ActorsGetActorUsageResponse' +======= + $ref: '#/components/schemas/ActorsV1DestroyActorResponse' +>>>>>>> 43e5048bc (fix: api changes) '400': description: '' content: @@ -617,19 +648,26 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /builds/{build}: +<<<<<<< HEAD + /actors/query: get: - description: Get a build. - operationId: builds_get + description: >- + Queries actors using a JSON-encoded query expression. Supports + pagination with cursor-based navigation. + operationId: actors_query tags: - - Builds + - Actors +======= + /actors: + get: + description: >- + Lists all actors associated with the token used. Can be filtered by tags + in the query string. + operationId: actors_v1_list + tags: + - ActorsV1 +>>>>>>> 43e5048bc (fix: api changes) parameters: - - name: build - in: path - required: true - schema: - type: string - format: uuid - name: project in: query required: false @@ -640,13 +678,49 @@ paths: required: false schema: type: string +<<<<<<< HEAD + - name: query_json + in: query + description: JSON-encoded query expression for filtering actors + required: false + schema: + type: string + - name: cursor + in: query + description: Cursor for pagination +======= + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ActorsV1EndpointType' + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query +>>>>>>> 43e5048bc (fix: api changes) + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/BuildsGetBuildResponse' +<<<<<<< HEAD + $ref: '#/components/schemas/ActorsQueryActorsResponse' +======= + $ref: '#/components/schemas/ActorsV1ListActorsResponse' +>>>>>>> 43e5048bc (fix: api changes) '400': description: '' content: @@ -684,14 +758,13 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /builds: - get: - description: >- - Lists all builds of the project associated with the token used. Can be - filtered by tags in the query string. - operationId: builds_list +<<<<<<< HEAD +======= + post: + description: Create a new actor. + operationId: actors_v1_create tags: - - Builds + - ActorsV1 parameters: - name: project in: query @@ -703,18 +776,18 @@ paths: required: false schema: type: string - - name: tags_json + - name: endpoint_type in: query required: false schema: - type: string + $ref: '#/components/schemas/ActorsV1EndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/BuildsListBuildsResponse' + $ref: '#/components/schemas/ActorsV1CreateActorResponse' '400': description: '' content: @@ -752,14 +825,22 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /builds/{build}/tags: - patch: - operationId: builds_patchTags + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ActorsV1CreateActorRequest' + /actors/{actor}/upgrade: + post: + description: Upgrades a actor. + operationId: actors_v1_upgrade tags: - - Builds + - ActorsV1 parameters: - - name: build + - name: actor in: path + description: The id of the actor to upgrade required: true schema: type: string @@ -780,7 +861,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPatchBuildTagsResponse' + $ref: '#/components/schemas/ActorsV1UpgradeActorResponse' '400': description: '' content: @@ -823,13 +904,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPatchBuildTagsRequest' - /builds/prepare: + $ref: '#/components/schemas/ActorsV1UpgradeActorRequest' + /actors/upgrade: post: - description: Creates a new project build for the given project. - operationId: builds_prepare + description: Upgrades all actors matching the given tags. + operationId: actors_v1_upgradeAll tags: - - Builds + - ActorsV1 parameters: - name: project in: query @@ -847,7 +928,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPrepareBuildResponse' + $ref: '#/components/schemas/ActorsV1UpgradeAllActorsResponse' '400': description: '' content: @@ -890,11 +971,12 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPrepareBuildRequest' - /builds/{build}/complete: - post: - description: Marks an upload as complete. - operationId: builds_complete + $ref: '#/components/schemas/ActorsV1UpgradeAllActorsRequest' +>>>>>>> 43e5048bc (fix: api changes) + /builds/{build}: + get: + description: Get a build. + operationId: builds_get tags: - Builds parameters: @@ -915,8 +997,12 @@ paths: schema: type: string responses: - '204': + '200': description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BuildsGetBuildResponse' '400': description: '' content: @@ -954,20 +1040,37 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /cloud/bootstrap: + /builds: get: - description: Returns the basic information required to use the cloud APIs. - operationId: cloud_bootstrap + description: >- + Lists all builds of the project associated with the token used. Can be + filtered by tags in the query string. + operationId: builds_list tags: - - Cloud - parameters: [] + - Builds + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: tags_json + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/CloudBootstrapResponse' + $ref: '#/components/schemas/BuildsListBuildsResponse' '400': description: '' content: @@ -1005,18 +1108,25 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /cloud/games: - get: - description: >- - Returns a list of games in which the current identity is a group member - of its development team. - operationId: cloud_games_getGames + /builds/{build}/tags: + patch: + operationId: builds_patchTags tags: - - CloudGames + - Builds parameters: - - name: watch_index + - name: build + in: path + required: true + schema: + type: string + format: uuid + - name: project + in: query + required: false + schema: + type: string + - name: environment in: query - description: A query parameter denoting the requests watch index. required: false schema: type: string @@ -1026,7 +1136,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CloudGamesGetGamesResponse' + $ref: '#/components/schemas/BuildsPatchBuildTagsResponse' '400': description: '' content: @@ -1064,19 +1174,36 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BuildsPatchBuildTagsRequest' + /builds/prepare: post: - description: Creates a new game. - operationId: cloud_games_createGame + description: Creates a new project build for the given project. + operationId: builds_prepare tags: - - CloudGames - parameters: [] + - Builds + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/CloudGamesCreateGameResponse' + $ref: '#/components/schemas/BuildsPrepareBuildResponse' '400': description: '' content: @@ -1119,18 +1246,247 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CloudGamesCreateGameRequest' - /cloud/games/validate: + $ref: '#/components/schemas/BuildsPrepareBuildRequest' + /builds/{build}/complete: post: - description: Validates information used to create a new game. - operationId: cloud_games_validateGame + description: Marks an upload as complete. + operationId: builds_complete tags: - - CloudGames - parameters: [] - responses: - '200': - description: '' - content: + - Builds + parameters: + - name: build + in: path + required: true + schema: + type: string + format: uuid + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /cloud/bootstrap: + get: + description: Returns the basic information required to use the cloud APIs. + operationId: cloud_bootstrap + tags: + - Cloud + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CloudBootstrapResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /cloud/games: + get: + description: >- + Returns a list of games in which the current identity is a group member + of its development team. + operationId: cloud_games_getGames + tags: + - CloudGames + parameters: + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CloudGamesGetGamesResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + post: + description: Creates a new game. + operationId: cloud_games_createGame + tags: + - CloudGames + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CloudGamesCreateGameResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CloudGamesCreateGameRequest' + /cloud/games/validate: + post: + description: Validates information used to create a new game. + operationId: cloud_games_validateGame + tags: + - CloudGames + parameters: [] + responses: + '200': + description: '' + content: application/json: schema: $ref: '#/components/schemas/CloudGamesValidateGameResponse' @@ -2567,18 +2923,460 @@ paths: schema: $ref: >- #/components/schemas/CloudGamesNamespacesUpdateGameNamespaceVersionRequest - /pegboard/client/{client_id}/registered: - post: - operationId: coreIntercom_pegboard_markClientRegistered + /v1/containers/{container}: + get: + description: Gets a container. + operationId: containers_get tags: - - CoreIntercomPegboard + - Containers parameters: - - name: client_id + - name: container in: path + description: The id of the container to destroy required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false schema: type: string - format: uuid + - name: environment + in: query + required: false + schema: + type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + delete: + description: Destroy a container. + operationId: containers_destroy + tags: + - Containers + parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: override_kill_timeout + in: query + description: >- + The duration to wait for in milliseconds before killing the + container. This should be used to override the default kill timeout + if a faster time is needed, say for ignoring a graceful shutdown. + required: false + schema: + type: integer + format: int64 + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersDestroyContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers: + get: + description: >- + Lists all containers associated with the token used. Can be filtered by + tags in the query string. + operationId: containers_list + tags: + - Containers + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersListContainersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + post: + description: Create a new container. + operationId: containers_create + tags: + - Containers + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersCreateContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersCreateContainerRequest' + /v1/containers/{container}/upgrade: + post: + description: Upgrades a container. + operationId: containers_upgrade + tags: + - Containers + parameters: + - name: container + in: path + description: The id of the container to upgrade + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeContainerRequest' + /v1/containers/upgrade: + post: + description: Upgrades all containers matching the given tags. + operationId: containers_upgradeAll + tags: + - Containers + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersRequest' + /pegboard/client/{client_id}/registered: + post: + operationId: coreIntercom_pegboard_markClientRegistered + tags: + - CoreIntercomPegboard + parameters: + - name: client_id + in: path + required: true + schema: + type: string + format: uuid responses: '204': description: '' @@ -5437,30 +6235,171 @@ paths: required: true schema: type: string - format: uuid - - name: tags_json + format: uuid + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query + required: false + schema: + type: string + format: uuid + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ServersListServersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + post: + description: Create a new dynamic server. + operationId: servers_create + tags: + - Servers + parameters: + - name: game_id + in: path + required: true + schema: + type: string + format: uuid + - name: environment_id + in: path + required: true + schema: + type: string + format: uuid + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ServersCreateServerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ServersCreateServerRequest' + /v2/actors/logs: + get: + description: Returns the logs for a given actor. + operationId: actors_logs_get + tags: + - ActorsLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment in: query required: false schema: type: string - - name: include_destroyed + - name: query_json in: query + description: JSON-encoded query expression for filtering logs required: false schema: - type: boolean - - name: cursor + type: string + - name: watch_index in: query + description: A query parameter denoting the requests watch index. required: false schema: type: string - format: uuid responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ServersListServersResponse' + $ref: '#/components/schemas/ActorsGetActorLogsResponse' '400': description: '' content: @@ -5498,31 +6437,23 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 +<<<<<<< HEAD + /actors/logs/export: post: - description: Create a new dynamic server. - operationId: servers_create + description: >- + Exports logs for the given actors to an S3 bucket and returns a + presigned URL to download. + operationId: actors_logs_export tags: - - Servers - parameters: - - name: game_id - in: path - required: true - schema: - type: string - format: uuid - - name: environment_id - in: path - required: true - schema: - type: string - format: uuid + - ActorsLogs + parameters: [] responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ServersCreateServerResponse' + $ref: '#/components/schemas/ActorsExportActorLogsResponse' '400': description: '' content: @@ -5565,14 +6496,31 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ServersCreateServerRequest' - /actors/logs: + type: object + properties: + project: + type: string + environment: + type: string + query_json: + type: string + description: JSON-encoded query expression for filtering logs + /actors/{actor}/metrics/history: +======= + /v2/actors/{actor}/metrics/history: +>>>>>>> 43e5048bc (fix: api changes) get: - description: Returns the logs for a given actor. - operationId: actors_logs_get + description: Returns the metrics for a given actor. + operationId: actors_metrics_get tags: - - ActorsLogs + - ActorsMetrics parameters: + - name: actor + in: path + description: The id of the actor to destroy + required: true + schema: + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -5583,25 +6531,28 @@ paths: required: false schema: type: string - - name: query_json + - name: start in: query - description: JSON-encoded query expression for filtering logs - required: false + required: true schema: - type: string - - name: watch_index + type: integer + - name: end in: query - description: A query parameter denoting the requests watch index. - required: false + required: true schema: - type: string + type: integer + - name: interval + in: query + required: true + schema: + type: integer responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorLogsResponse' + $ref: '#/components/schemas/ActorsGetActorMetricsResponse' '400': description: '' content: @@ -5639,22 +6590,61 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors/logs/export: - post: - description: >- - Exports logs for the given actors to an S3 bucket and returns a - presigned URL to download. - operationId: actors_logs_export + /actors/logs: + get: + description: Returns the logs for a given actor. + operationId: actors_v1_logs_get tags: - - ActorsLogs - parameters: [] + - ActorsV1Logs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: stream + in: query + required: true + schema: + $ref: '#/components/schemas/ActorsV1QueryLogStream' + - name: actor_ids_json + in: query + required: true + schema: + type: string + - name: search_text + in: query + required: false + schema: + type: string + - name: search_case_sensitive + in: query + required: false + schema: + type: boolean + - name: search_enable_regex + in: query + required: false + schema: + type: boolean + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsExportActorLogsResponse' + $ref: '#/components/schemas/ActorsV1GetActorLogsResponse' '400': description: '' content: @@ -5692,26 +6682,12 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - project: - type: string - environment: - type: string - query_json: - type: string - description: JSON-encoded query expression for filtering logs /actors/{actor}/metrics/history: get: description: Returns the metrics for a given actor. - operationId: actors_metrics_get + operationId: actors_v1_metrics_get tags: - - ActorsMetrics + - ActorsV1Metrics parameters: - name: actor in: path @@ -5751,7 +6727,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorMetricsResponse' + $ref: '#/components/schemas/ActorsV1GetActorMetricsResponse' '400': description: '' content: @@ -7502,7 +8478,152 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CloudGetRegionTiersResponse' + $ref: '#/components/schemas/CloudGetRegionTiersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /cloud/uploads/{upload_id}/complete: + post: + description: Marks an upload as complete. + operationId: cloud_uploads_completeUpload + tags: + - CloudUploads + parameters: + - name: upload_id + in: path + required: true + schema: + type: string + format: uuid + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers/logs: + get: + description: Returns the logs for a given container. + operationId: containers_logs_get + tags: + - ContainersLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: stream + in: query + required: true + schema: + $ref: '#/components/schemas/ContainersQueryLogStream' + - name: container_ids_json + in: query + required: true + schema: + type: string + - name: search_text + in: query + required: false + schema: + type: string + - name: search_case_sensitive + in: query + required: false + schema: + type: boolean + - name: search_enable_regex + in: query + required: false + schema: + type: boolean + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerLogsResponse' '400': description: '' content: @@ -7540,22 +8661,51 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /cloud/uploads/{upload_id}/complete: - post: - description: Marks an upload as complete. - operationId: cloud_uploads_completeUpload + /v1/containers/{container}/metrics/history: + get: + description: Returns the metrics for a given container. + operationId: containers_metrics_get tags: - - CloudUploads + - ContainersMetrics parameters: - - name: upload_id + - name: container in: path + description: The id of the container to destroy required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false schema: type: string - format: uuid + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + required: true + schema: + type: integer + - name: end + in: query + required: true + schema: + type: integer + - name: interval + in: query + required: true + schema: + type: integer responses: - '204': + '200': description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerMetricsResponse' '400': description: '' content: @@ -9634,8 +10784,6 @@ components: $ref: '#/components/schemas/ActorsCreateActorRuntimeRequest' network: $ref: '#/components/schemas/ActorsCreateActorNetworkRequest' - resources: - $ref: '#/components/schemas/ActorsResources' lifecycle: $ref: '#/components/schemas/ActorsLifecycle' required: @@ -9730,6 +10878,7 @@ components: required: - actors - pagination +<<<<<<< HEAD ActorsGetActorUsageResponse: type: object properties: @@ -9760,13 +10909,125 @@ components: - metric_types - metric_values ActorsQueryActorsResponse: +======= + ActorsV1GetActorResponse: + type: object + properties: + actor: + $ref: '#/components/schemas/ActorsV1Actor' + required: + - actor + ActorsV1CreateActorRequest: + type: object + properties: + region: + type: string + tags: {} + build: + type: string + format: uuid + build_tags: {} + runtime: + $ref: '#/components/schemas/ActorsV1CreateActorRuntimeRequest' + network: + $ref: '#/components/schemas/ActorsV1CreateActorNetworkRequest' + resources: + $ref: '#/components/schemas/ActorsV1Resources' + lifecycle: + $ref: '#/components/schemas/ActorsV1Lifecycle' + required: + - tags + ActorsV1CreateActorRuntimeRequest: + type: object + properties: + environment: + type: object + additionalProperties: + type: string + network: + $ref: '#/components/schemas/ActorsV1CreateActorRuntimeNetworkRequest' + ActorsV1CreateActorRuntimeNetworkRequest: + type: object + properties: + endpoint_type: + $ref: '#/components/schemas/ActorsV1EndpointType' + required: + - endpoint_type + ActorsV1CreateActorNetworkRequest: + type: object + properties: + mode: + $ref: '#/components/schemas/ActorsV1NetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ActorsV1CreateActorPortRequest' + wait_ready: + type: boolean + ActorsV1CreateActorPortRequest: + type: object + properties: + protocol: + $ref: '#/components/schemas/ActorsV1PortProtocol' + internal_port: + type: integer + routing: + $ref: '#/components/schemas/ActorsV1PortRouting' + required: + - protocol + ActorsV1CreateActorResponse: + type: object + properties: + actor: + $ref: '#/components/schemas/ActorsV1Actor' + description: The actor that was created + required: + - actor + ActorsV1DestroyActorResponse: + type: object + properties: {} + ActorsV1UpgradeActorRequest: + type: object + properties: + build: + type: string + format: uuid + build_tags: {} + ActorsV1UpgradeActorResponse: + type: object + properties: {} + ActorsV1UpgradeAllActorsRequest: + type: object + properties: + tags: {} + build: + type: string + format: uuid + build_tags: {} + required: + - tags + ActorsV1UpgradeAllActorsResponse: + type: object + properties: + count: + type: integer + format: int64 + required: + - count + ActorsV1ListActorsResponse: +>>>>>>> 43e5048bc (fix: api changes) type: object properties: actors: type: array items: +<<<<<<< HEAD $ref: '#/components/schemas/ActorsActor' description: A list of actors matching the query +======= + $ref: '#/components/schemas/ActorsV1Actor' + description: A list of actors for the project associated with the token. +>>>>>>> 43e5048bc (fix: api changes) pagination: $ref: '#/components/schemas/Pagination' required: @@ -10449,41 +11710,159 @@ components: properties: {} CloudVersionMatchmakerMatchmakerConfig: type: object - description: Matchmaker configuration for a given version. + description: Matchmaker configuration for a given version. + properties: + game_modes: + type: object + additionalProperties: + $ref: '#/components/schemas/CloudVersionMatchmakerGameMode' + description: A list of game modes. + captcha: + $ref: '#/components/schemas/CloudVersionMatchmakerCaptcha' + dev_hostname: + type: string + description: _Configures Rivet CLI behavior. Has no effect on server behavior._ + regions: + type: object + additionalProperties: + $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRegion' + max_players: + type: integer + max_players_direct: + type: integer + max_players_party: + type: integer + docker: + $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRuntimeDocker' + tier: + type: string + idle_lobbies: + $ref: '#/components/schemas/CloudVersionMatchmakerGameModeIdleLobbiesConfig' + lobby_groups: + type: array + items: + $ref: '#/components/schemas/CloudVersionMatchmakerLobbyGroup' + description: |- + **Deprecated: use `game_modes` instead** + A list of game modes. + ContainersGetContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + required: + - container + ContainersCreateContainerRequest: + type: object + properties: + region: + type: string + tags: {} + build: + type: string + format: uuid + build_tags: {} + runtime: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeRequest' + network: + $ref: '#/components/schemas/ContainersCreateContainerNetworkRequest' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + required: + - tags + - resources + ContainersCreateContainerRuntimeRequest: + type: object + properties: + environment: + type: object + additionalProperties: + type: string + network: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeNetworkRequest' + ContainersCreateContainerRuntimeNetworkRequest: + type: object + properties: + endpoint_type: + $ref: '#/components/schemas/ContainersEndpointType' + required: + - endpoint_type + ContainersCreateContainerNetworkRequest: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersCreateContainerPortRequest' + wait_ready: + type: boolean + ContainersCreateContainerPortRequest: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + ContainersCreateContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + description: The container that was created + required: + - container + ContainersDestroyContainerResponse: + type: object + properties: {} + ContainersUpgradeContainerRequest: + type: object + properties: + build: + type: string + format: uuid + build_tags: {} + ContainersUpgradeContainerResponse: + type: object + properties: {} + ContainersUpgradeAllContainersRequest: + type: object + properties: + tags: {} + build: + type: string + format: uuid + build_tags: {} + required: + - tags + ContainersUpgradeAllContainersResponse: + type: object properties: - game_modes: - type: object - additionalProperties: - $ref: '#/components/schemas/CloudVersionMatchmakerGameMode' - description: A list of game modes. - captcha: - $ref: '#/components/schemas/CloudVersionMatchmakerCaptcha' - dev_hostname: - type: string - description: _Configures Rivet CLI behavior. Has no effect on server behavior._ - regions: - type: object - additionalProperties: - $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRegion' - max_players: - type: integer - max_players_direct: - type: integer - max_players_party: + count: type: integer - docker: - $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRuntimeDocker' - tier: - type: string - idle_lobbies: - $ref: '#/components/schemas/CloudVersionMatchmakerGameModeIdleLobbiesConfig' - lobby_groups: + format: int64 + required: + - count + ContainersListContainersResponse: + type: object + properties: + containers: type: array items: - $ref: '#/components/schemas/CloudVersionMatchmakerLobbyGroup' - description: |- - **Deprecated: use `game_modes` instead** - A list of game modes. + $ref: '#/components/schemas/ContainersContainer' + description: A list of containers for the project associated with the token. + pagination: + $ref: '#/components/schemas/Pagination' + required: + - containers + - pagination CoreIntercomPegboardMarkClientRegisteredRequest: type: object properties: @@ -10996,26 +12375,246 @@ components: servers: type: array items: - $ref: '#/components/schemas/ServersServer' - description: A list of servers for the game associated with the token. + $ref: '#/components/schemas/ServersServer' + description: A list of servers for the game associated with the token. + required: + - servers + ActorsActor: + type: object + properties: + id: + $ref: '#/components/schemas/Id' + region: + type: string + tags: {} + runtime: + $ref: '#/components/schemas/ActorsRuntime' + network: + $ref: '#/components/schemas/ActorsNetwork' + lifecycle: + $ref: '#/components/schemas/ActorsLifecycle' + created_at: + $ref: '#/components/schemas/Timestamp' + started_at: + $ref: '#/components/schemas/Timestamp' + destroyed_at: + $ref: '#/components/schemas/Timestamp' + required: + - id + - region + - tags + - runtime + - network + - lifecycle + - created_at + ActorsRuntime: + type: object + properties: + build: + type: string + format: uuid + arguments: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + required: + - build + ActorsLifecycle: + type: object + properties: + kill_timeout: + type: integer + format: int64 + description: >- + The duration to wait for in milliseconds before killing the actor. + This should be set to a safe default, and can be overridden during a + DELETE request if needed. + durable: + type: boolean + description: >- + If true, the actor will try to reschedule itself automatically in + the event of a crash or a datacenter failover. The actor will not + reschedule if it exits successfully. + ActorsNetwork: + type: object + properties: + mode: + $ref: '#/components/schemas/ActorsNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ActorsPort' + required: + - mode + - ports + ActorsNetworkMode: + type: string + enum: + - bridge + - host + ActorsPort: + type: object + properties: + protocol: + $ref: '#/components/schemas/ActorsPortProtocol' + internal_port: + type: integer + hostname: + type: string + port: + type: integer + path: + type: string + url: + type: string + description: >- + Fully formed connection URL including protocol, hostname, port, and + path, if applicable. + routing: + $ref: '#/components/schemas/ActorsPortRouting' + required: + - protocol + - routing + ActorsPortProtocol: + type: string + enum: + - http + - https + - tcp + - tcp_tls + - udp + ActorsPortRouting: + type: object + properties: + guard: + $ref: '#/components/schemas/ActorsGuardRouting' + host: + $ref: '#/components/schemas/ActorsHostRouting' + ActorsGuardRouting: + type: object + properties: {} + ActorsHostRouting: + type: object + properties: {} + ActorsEndpointType: + type: string + enum: + - hostname + - path + ActorsGetActorLogsResponse: + type: object + properties: + actor_ids: + type: array + items: + type: string + description: >- + List of actor IDs in these logs. The order of these correspond to + the index in the log entry. + lines: + type: array + items: + type: string + description: Sorted old to new. + timestamps: + type: array + items: + $ref: '#/components/schemas/Timestamp' + description: Sorted old to new. + streams: + type: array + items: + type: integer + description: |- + Streams the logs came from. + + 0 = stdout + 1 = stderr + foreigns: + type: array + items: + type: boolean + description: List of flags denoting if this log is not directly from the actor. + actor_indices: + type: array + items: + type: integer + description: >- + Index of the actor that this log was for. Use this index to look the + full ID in `actor_ids`. + watch: + $ref: '#/components/schemas/WatchResponse' + required: + - actor_ids + - lines + - timestamps + - streams + - foreigns + - actor_indices + - watch + ActorsExportActorLogsResponse: + type: object + properties: + url: + type: string + description: Presigned URL to download the exported logs + required: + - url + ActorsGetActorMetricsResponse: + type: object + properties: + actor_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double required: - - servers - ActorsActor: + - actor_ids + - metric_names + - metric_attributes + - metric_types + - metric_values + ActorsV1Actor: type: object properties: id: - $ref: '#/components/schemas/Id' + type: string + format: uuid region: type: string tags: {} runtime: - $ref: '#/components/schemas/ActorsRuntime' + $ref: '#/components/schemas/ActorsV1Runtime' network: - $ref: '#/components/schemas/ActorsNetwork' + $ref: '#/components/schemas/ActorsV1Network' resources: - $ref: '#/components/schemas/ActorsResources' + $ref: '#/components/schemas/ActorsV1Resources' lifecycle: - $ref: '#/components/schemas/ActorsLifecycle' + $ref: '#/components/schemas/ActorsV1Lifecycle' created_at: $ref: '#/components/schemas/Timestamp' started_at: @@ -11030,7 +12629,7 @@ components: - network - lifecycle - created_at - ActorsRuntime: + ActorsV1Runtime: type: object properties: build: @@ -11046,7 +12645,7 @@ components: type: string required: - build - ActorsLifecycle: + ActorsV1Lifecycle: type: object properties: kill_timeout: @@ -11062,7 +12661,7 @@ components: If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. - ActorsResources: + ActorsV1Resources: type: object properties: cpu: @@ -11080,28 +12679,28 @@ components: required: - cpu - memory - ActorsNetwork: + ActorsV1Network: type: object properties: mode: - $ref: '#/components/schemas/ActorsNetworkMode' + $ref: '#/components/schemas/ActorsV1NetworkMode' ports: type: object additionalProperties: - $ref: '#/components/schemas/ActorsPort' + $ref: '#/components/schemas/ActorsV1Port' required: - mode - ports - ActorsNetworkMode: + ActorsV1NetworkMode: type: string enum: - bridge - host - ActorsPort: + ActorsV1Port: type: object properties: protocol: - $ref: '#/components/schemas/ActorsPortProtocol' + $ref: '#/components/schemas/ActorsV1PortProtocol' internal_port: type: integer hostname: @@ -11116,11 +12715,11 @@ components: Fully formed connection URL including protocol, hostname, port, and path, if applicable. routing: - $ref: '#/components/schemas/ActorsPortRouting' + $ref: '#/components/schemas/ActorsV1PortRouting' required: - protocol - routing - ActorsPortProtocol: + ActorsV1PortProtocol: type: string enum: - http @@ -11128,31 +12727,31 @@ components: - tcp - tcp_tls - udp - ActorsPortRouting: + ActorsV1PortRouting: type: object properties: guard: - $ref: '#/components/schemas/ActorsGuardRouting' + $ref: '#/components/schemas/ActorsV1GuardRouting' host: - $ref: '#/components/schemas/ActorsHostRouting' - ActorsGuardRouting: + $ref: '#/components/schemas/ActorsV1HostRouting' + ActorsV1GuardRouting: type: object properties: {} - ActorsHostRouting: + ActorsV1HostRouting: type: object properties: {} - ActorsEndpointType: + ActorsV1EndpointType: type: string enum: - hostname - path - ActorsGetActorLogsResponse: + ActorsV1GetActorLogsResponse: type: object properties: actor_ids: type: array items: - $ref: '#/components/schemas/Id' + type: string description: >- List of actor IDs in these logs. The order of these correspond to the index in the log entry. @@ -11175,11 +12774,6 @@ components: 0 = stdout 1 = stderr - foreigns: - type: array - items: - type: boolean - description: List of flags denoting if this log is not directly from the actor. actor_indices: type: array items: @@ -11194,18 +12788,15 @@ components: - lines - timestamps - streams - - foreigns - actor_indices - watch - ActorsExportActorLogsResponse: - type: object - properties: - url: - type: string - description: Presigned URL to download the exported logs - required: - - url - ActorsGetActorMetricsResponse: + ActorsV1QueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ActorsV1GetActorMetricsResponse: type: object properties: actor_ids: @@ -13251,6 +14842,246 @@ components: properties: cursor: type: string + ContainersContainer: + type: object + properties: + id: + $ref: '#/components/schemas/Id' + region: + type: string + tags: {} + runtime: + $ref: '#/components/schemas/ContainersRuntime' + network: + $ref: '#/components/schemas/ContainersNetwork' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + created_at: + $ref: '#/components/schemas/Timestamp' + started_at: + $ref: '#/components/schemas/Timestamp' + destroyed_at: + $ref: '#/components/schemas/Timestamp' + required: + - id + - region + - tags + - runtime + - network + - resources + - lifecycle + - created_at + ContainersRuntime: + type: object + properties: + build: + type: string + format: uuid + arguments: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + required: + - build + ContainersLifecycle: + type: object + properties: + kill_timeout: + type: integer + format: int64 + description: >- + The duration to wait for in milliseconds before killing the + container. This should be set to a safe default, and can be + overridden during a DELETE request if needed. + durable: + type: boolean + description: >- + If true, the container will try to reschedule itself automatically + in the event of a crash or a datacenter failover. The container will + not reschedule if it exits successfully. + ContainersResources: + type: object + properties: + cpu: + type: integer + description: >- + The number of CPU cores in millicores, or 1/1000 of a core. For + example, + + 1/8 of a core would be 125 millicores, and 1 core would be 1000 + + millicores. + memory: + type: integer + description: The amount of memory in megabytes + required: + - cpu + - memory + ContainersNetwork: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersPort' + required: + - mode + - ports + ContainersNetworkMode: + type: string + enum: + - bridge + - host + ContainersPort: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + hostname: + type: string + port: + type: integer + path: + type: string + url: + type: string + description: >- + Fully formed connection URL including protocol, hostname, port, and + path, if applicable. + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + - routing + ContainersPortProtocol: + type: string + enum: + - http + - https + - tcp + - tcp_tls + - udp + ContainersPortRouting: + type: object + properties: + guard: + $ref: '#/components/schemas/ContainersGuardRouting' + host: + $ref: '#/components/schemas/ContainersHostRouting' + ContainersGuardRouting: + type: object + properties: {} + ContainersHostRouting: + type: object + properties: {} + ContainersEndpointType: + type: string + enum: + - hostname + - path + ContainersGetContainerLogsResponse: + type: object + properties: + container_ids: + type: array + items: + $ref: '#/components/schemas/Id' + description: >- + List of container IDs in these logs. The order of these correspond + to the index in the log entry. + lines: + type: array + items: + type: string + description: Sorted old to new. + timestamps: + type: array + items: + $ref: '#/components/schemas/Timestamp' + description: Sorted old to new. + streams: + type: array + items: + type: integer + description: |- + Streams the logs came from. + + 0 = stdout + 1 = stderr + foreigns: + type: array + items: + type: boolean + description: >- + List of flags denoting if this log is not directly from the + container. + container_indices: + type: array + items: + type: integer + description: >- + Index of the container that this log was for. Use this index to look + the full ID in `container_ids`. + watch: + $ref: '#/components/schemas/WatchResponse' + required: + - container_ids + - lines + - timestamps + - streams + - foreigns + - container_indices + - watch + ContainersQueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ContainersGetContainerMetricsResponse: + type: object + properties: + container_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double + required: + - container_ids + - metric_names + - metric_attributes + - metric_types + - metric_values GameHandle: type: object properties: diff --git a/sdks/api/full/openapi_compat/openapi.yml b/sdks/api/full/openapi_compat/openapi.yml index 05b9d1464f..b482319870 100644 --- a/sdks/api/full/openapi_compat/openapi.yml +++ b/sdks/api/full/openapi_compat/openapi.yml @@ -3,7 +3,7 @@ info: title: Rivet API version: 0.0.1 paths: - '/actors/{actor}': + '/v2/actors/{actor}': get: description: Gets a actor. operationId: actors_get @@ -152,7 +152,7 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors: + /v2/actors: get: description: >- Lists all actors associated with the token used. Can be filtered by tags @@ -306,7 +306,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsCreateActorRequest' - '/actors/{actor}/upgrade': + '/v2/actors/{actor}/upgrade': post: description: Upgrades a actor. operationId: actors_upgrade @@ -379,7 +379,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsUpgradeActorRequest' - /actors/upgrade: + /v2/actors/upgrade: post: description: Upgrades all actors matching the given tags. operationId: actors_upgradeAll @@ -446,6 +446,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsUpgradeAllActorsRequest' +<<<<<<< HEAD /actors/usage: get: description: >- @@ -455,6 +456,22 @@ paths: tags: - Actors parameters: +======= + '/actors/{actor}': + get: + description: Gets a actor. + operationId: actors_v1_get + tags: + - ActorsV1 + parameters: + - name: actor + in: path + description: The id of the actor to destroy + required: true + schema: + type: string + format: uuid +>>>>>>> 43e5048bc (fix: api changes) - name: project in: query required: false @@ -465,6 +482,7 @@ paths: required: false schema: type: string +<<<<<<< HEAD - name: start in: query description: Start timestamp in milliseconds @@ -498,13 +516,19 @@ paths: required: false schema: type: string +======= + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ActorsV1EndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorUsageResponse' + $ref: '#/components/schemas/ActorsV1GetActorResponse' '400': description: '' content: @@ -542,44 +566,51 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors/query: - get: - description: >- - Queries actors using a JSON-encoded query expression. Supports - pagination with cursor-based navigation. - operationId: actors_query + delete: + description: Destroy a actor. + operationId: actors_v1_destroy tags: - - Actors + - ActorsV1 parameters: - - name: project - in: query - required: false + - name: actor + in: path + description: The id of the actor to destroy + required: true schema: type: string - - name: environment + format: uuid + - name: project in: query required: false schema: type: string - - name: query_json + - name: environment in: query - description: JSON-encoded query expression for filtering actors required: false schema: type: string - - name: cursor + - name: override_kill_timeout in: query - description: Cursor for pagination + description: >- + The duration to wait for in milliseconds before killing the actor. + This should be used to override the default kill timeout if a faster + time is needed, say for ignoring a graceful shutdown. required: false schema: - type: string + type: integer + format: int64 +>>>>>>> 43e5048bc (fix: api changes) responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsQueryActorsResponse' +<<<<<<< HEAD + $ref: '#/components/schemas/ActorsGetActorUsageResponse' +======= + $ref: '#/components/schemas/ActorsV1DestroyActorResponse' +>>>>>>> 43e5048bc (fix: api changes) '400': description: '' content: @@ -617,19 +648,26 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - '/builds/{build}': +<<<<<<< HEAD + /actors/query: get: - description: Get a build. - operationId: builds_get + description: >- + Queries actors using a JSON-encoded query expression. Supports + pagination with cursor-based navigation. + operationId: actors_query tags: - - Builds + - Actors +======= + /actors: + get: + description: >- + Lists all actors associated with the token used. Can be filtered by tags + in the query string. + operationId: actors_v1_list + tags: + - ActorsV1 +>>>>>>> 43e5048bc (fix: api changes) parameters: - - name: build - in: path - required: true - schema: - type: string - format: uuid - name: project in: query required: false @@ -640,13 +678,49 @@ paths: required: false schema: type: string +<<<<<<< HEAD + - name: query_json + in: query + description: JSON-encoded query expression for filtering actors + required: false + schema: + type: string + - name: cursor + in: query + description: Cursor for pagination +======= + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ActorsV1EndpointType' + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query +>>>>>>> 43e5048bc (fix: api changes) + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/BuildsGetBuildResponse' +<<<<<<< HEAD + $ref: '#/components/schemas/ActorsQueryActorsResponse' +======= + $ref: '#/components/schemas/ActorsV1ListActorsResponse' +>>>>>>> 43e5048bc (fix: api changes) '400': description: '' content: @@ -684,14 +758,13 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /builds: - get: - description: >- - Lists all builds of the project associated with the token used. Can be - filtered by tags in the query string. - operationId: builds_list +<<<<<<< HEAD +======= + post: + description: Create a new actor. + operationId: actors_v1_create tags: - - Builds + - ActorsV1 parameters: - name: project in: query @@ -703,18 +776,18 @@ paths: required: false schema: type: string - - name: tags_json + - name: endpoint_type in: query required: false schema: - type: string + $ref: '#/components/schemas/ActorsV1EndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/BuildsListBuildsResponse' + $ref: '#/components/schemas/ActorsV1CreateActorResponse' '400': description: '' content: @@ -752,14 +825,22 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - '/builds/{build}/tags': - patch: - operationId: builds_patchTags + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ActorsV1CreateActorRequest' + '/actors/{actor}/upgrade': + post: + description: Upgrades a actor. + operationId: actors_v1_upgrade tags: - - Builds + - ActorsV1 parameters: - - name: build + - name: actor in: path + description: The id of the actor to upgrade required: true schema: type: string @@ -780,7 +861,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPatchBuildTagsResponse' + $ref: '#/components/schemas/ActorsV1UpgradeActorResponse' '400': description: '' content: @@ -823,13 +904,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPatchBuildTagsRequest' - /builds/prepare: + $ref: '#/components/schemas/ActorsV1UpgradeActorRequest' + /actors/upgrade: post: - description: Creates a new project build for the given project. - operationId: builds_prepare + description: Upgrades all actors matching the given tags. + operationId: actors_v1_upgradeAll tags: - - Builds + - ActorsV1 parameters: - name: project in: query @@ -847,7 +928,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPrepareBuildResponse' + $ref: '#/components/schemas/ActorsV1UpgradeAllActorsResponse' '400': description: '' content: @@ -890,11 +971,12 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/BuildsPrepareBuildRequest' - '/builds/{build}/complete': - post: - description: Marks an upload as complete. - operationId: builds_complete + $ref: '#/components/schemas/ActorsV1UpgradeAllActorsRequest' +>>>>>>> 43e5048bc (fix: api changes) + '/builds/{build}': + get: + description: Get a build. + operationId: builds_get tags: - Builds parameters: @@ -915,8 +997,12 @@ paths: schema: type: string responses: - '204': + '200': description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/BuildsGetBuildResponse' '400': description: '' content: @@ -954,20 +1040,37 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /cloud/bootstrap: + /builds: get: - description: Returns the basic information required to use the cloud APIs. - operationId: cloud_bootstrap + description: >- + Lists all builds of the project associated with the token used. Can be + filtered by tags in the query string. + operationId: builds_list tags: - - Cloud - parameters: [] + - Builds + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: tags_json + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/CloudBootstrapResponse' + $ref: '#/components/schemas/BuildsListBuildsResponse' '400': description: '' content: @@ -1005,18 +1108,25 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /cloud/games: - get: - description: >- - Returns a list of games in which the current identity is a group member - of its development team. - operationId: cloud_games_getGames + '/builds/{build}/tags': + patch: + operationId: builds_patchTags tags: - - CloudGames + - Builds parameters: - - name: watch_index + - name: build + in: path + required: true + schema: + type: string + format: uuid + - name: project + in: query + required: false + schema: + type: string + - name: environment in: query - description: A query parameter denoting the requests watch index. required: false schema: type: string @@ -1026,7 +1136,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CloudGamesGetGamesResponse' + $ref: '#/components/schemas/BuildsPatchBuildTagsResponse' '400': description: '' content: @@ -1064,19 +1174,36 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BuildsPatchBuildTagsRequest' + /builds/prepare: post: - description: Creates a new game. - operationId: cloud_games_createGame + description: Creates a new project build for the given project. + operationId: builds_prepare tags: - - CloudGames - parameters: [] + - Builds + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/CloudGamesCreateGameResponse' + $ref: '#/components/schemas/BuildsPrepareBuildResponse' '400': description: '' content: @@ -1119,18 +1246,247 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CloudGamesCreateGameRequest' - /cloud/games/validate: + $ref: '#/components/schemas/BuildsPrepareBuildRequest' + '/builds/{build}/complete': post: - description: Validates information used to create a new game. - operationId: cloud_games_validateGame + description: Marks an upload as complete. + operationId: builds_complete tags: - - CloudGames - parameters: [] - responses: - '200': - description: '' - content: + - Builds + parameters: + - name: build + in: path + required: true + schema: + type: string + format: uuid + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /cloud/bootstrap: + get: + description: Returns the basic information required to use the cloud APIs. + operationId: cloud_bootstrap + tags: + - Cloud + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CloudBootstrapResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /cloud/games: + get: + description: >- + Returns a list of games in which the current identity is a group member + of its development team. + operationId: cloud_games_getGames + tags: + - CloudGames + parameters: + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CloudGamesGetGamesResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + post: + description: Creates a new game. + operationId: cloud_games_createGame + tags: + - CloudGames + parameters: [] + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/CloudGamesCreateGameResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CloudGamesCreateGameRequest' + /cloud/games/validate: + post: + description: Validates information used to create a new game. + operationId: cloud_games_validateGame + tags: + - CloudGames + parameters: [] + responses: + '200': + description: '' + content: application/json: schema: $ref: '#/components/schemas/CloudGamesValidateGameResponse' @@ -2567,18 +2923,460 @@ paths: schema: $ref: >- #/components/schemas/CloudGamesNamespacesUpdateGameNamespaceVersionRequest - '/pegboard/client/{client_id}/registered': - post: - operationId: coreIntercom_pegboard_markClientRegistered + '/v1/containers/{container}': + get: + description: Gets a container. + operationId: containers_get tags: - - CoreIntercomPegboard + - Containers parameters: - - name: client_id + - name: container in: path + description: The id of the container to destroy required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false schema: type: string - format: uuid + - name: environment + in: query + required: false + schema: + type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + delete: + description: Destroy a container. + operationId: containers_destroy + tags: + - Containers + parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: override_kill_timeout + in: query + description: >- + The duration to wait for in milliseconds before killing the + container. This should be used to override the default kill timeout + if a faster time is needed, say for ignoring a graceful shutdown. + required: false + schema: + type: integer + format: int64 + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersDestroyContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers: + get: + description: >- + Lists all containers associated with the token used. Can be filtered by + tags in the query string. + operationId: containers_list + tags: + - Containers + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersListContainersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + post: + description: Create a new container. + operationId: containers_create + tags: + - Containers + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersCreateContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersCreateContainerRequest' + '/v1/containers/{container}/upgrade': + post: + description: Upgrades a container. + operationId: containers_upgrade + tags: + - Containers + parameters: + - name: container + in: path + description: The id of the container to upgrade + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeContainerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeContainerRequest' + /v1/containers/upgrade: + post: + description: Upgrades all containers matching the given tags. + operationId: containers_upgradeAll + tags: + - Containers + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersRequest' + '/pegboard/client/{client_id}/registered': + post: + operationId: coreIntercom_pegboard_markClientRegistered + tags: + - CoreIntercomPegboard + parameters: + - name: client_id + in: path + required: true + schema: + type: string + format: uuid responses: '204': description: '' @@ -5437,30 +6235,171 @@ paths: required: true schema: type: string - format: uuid - - name: tags_json + format: uuid + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query + required: false + schema: + type: string + format: uuid + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ServersListServersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + post: + description: Create a new dynamic server. + operationId: servers_create + tags: + - Servers + parameters: + - name: game_id + in: path + required: true + schema: + type: string + format: uuid + - name: environment_id + in: path + required: true + schema: + type: string + format: uuid + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ServersCreateServerResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ServersCreateServerRequest' + /v2/actors/logs: + get: + description: Returns the logs for a given actor. + operationId: actors_logs_get + tags: + - ActorsLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment in: query required: false schema: type: string - - name: include_destroyed + - name: query_json in: query + description: JSON-encoded query expression for filtering logs required: false schema: - type: boolean - - name: cursor + type: string + - name: watch_index in: query + description: A query parameter denoting the requests watch index. required: false schema: type: string - format: uuid responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ServersListServersResponse' + $ref: '#/components/schemas/ActorsGetActorLogsResponse' '400': description: '' content: @@ -5498,31 +6437,23 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 +<<<<<<< HEAD + /actors/logs/export: post: - description: Create a new dynamic server. - operationId: servers_create + description: >- + Exports logs for the given actors to an S3 bucket and returns a + presigned URL to download. + operationId: actors_logs_export tags: - - Servers - parameters: - - name: game_id - in: path - required: true - schema: - type: string - format: uuid - - name: environment_id - in: path - required: true - schema: - type: string - format: uuid + - ActorsLogs + parameters: [] responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ServersCreateServerResponse' + $ref: '#/components/schemas/ActorsExportActorLogsResponse' '400': description: '' content: @@ -5565,14 +6496,31 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ServersCreateServerRequest' - /actors/logs: + type: object + properties: + project: + type: string + environment: + type: string + query_json: + type: string + description: JSON-encoded query expression for filtering logs + '/actors/{actor}/metrics/history': +======= + '/v2/actors/{actor}/metrics/history': +>>>>>>> 43e5048bc (fix: api changes) get: - description: Returns the logs for a given actor. - operationId: actors_logs_get + description: Returns the metrics for a given actor. + operationId: actors_metrics_get tags: - - ActorsLogs + - ActorsMetrics parameters: + - name: actor + in: path + description: The id of the actor to destroy + required: true + schema: + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -5583,25 +6531,28 @@ paths: required: false schema: type: string - - name: query_json + - name: start in: query - description: JSON-encoded query expression for filtering logs - required: false + required: true schema: - type: string - - name: watch_index + type: integer + - name: end in: query - description: A query parameter denoting the requests watch index. - required: false + required: true schema: - type: string + type: integer + - name: interval + in: query + required: true + schema: + type: integer responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorLogsResponse' + $ref: '#/components/schemas/ActorsGetActorMetricsResponse' '400': description: '' content: @@ -5639,22 +6590,61 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors/logs/export: - post: - description: >- - Exports logs for the given actors to an S3 bucket and returns a - presigned URL to download. - operationId: actors_logs_export + /actors/logs: + get: + description: Returns the logs for a given actor. + operationId: actors_v1_logs_get tags: - - ActorsLogs - parameters: [] + - ActorsV1Logs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: stream + in: query + required: true + schema: + $ref: '#/components/schemas/ActorsV1QueryLogStream' + - name: actor_ids_json + in: query + required: true + schema: + type: string + - name: search_text + in: query + required: false + schema: + type: string + - name: search_case_sensitive + in: query + required: false + schema: + type: boolean + - name: search_enable_regex + in: query + required: false + schema: + type: boolean + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsExportActorLogsResponse' + $ref: '#/components/schemas/ActorsV1GetActorLogsResponse' '400': description: '' content: @@ -5692,26 +6682,12 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - project: - type: string - environment: - type: string - query_json: - type: string - description: JSON-encoded query expression for filtering logs '/actors/{actor}/metrics/history': get: description: Returns the metrics for a given actor. - operationId: actors_metrics_get + operationId: actors_v1_metrics_get tags: - - ActorsMetrics + - ActorsV1Metrics parameters: - name: actor in: path @@ -5751,7 +6727,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorMetricsResponse' + $ref: '#/components/schemas/ActorsV1GetActorMetricsResponse' '400': description: '' content: @@ -7502,7 +8478,152 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/CloudGetRegionTiersResponse' + $ref: '#/components/schemas/CloudGetRegionTiersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + '/cloud/uploads/{upload_id}/complete': + post: + description: Marks an upload as complete. + operationId: cloud_uploads_completeUpload + tags: + - CloudUploads + parameters: + - name: upload_id + in: path + required: true + schema: + type: string + format: uuid + responses: + '204': + description: '' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers/logs: + get: + description: Returns the logs for a given container. + operationId: containers_logs_get + tags: + - ContainersLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: stream + in: query + required: true + schema: + $ref: '#/components/schemas/ContainersQueryLogStream' + - name: container_ids_json + in: query + required: true + schema: + type: string + - name: search_text + in: query + required: false + schema: + type: string + - name: search_case_sensitive + in: query + required: false + schema: + type: boolean + - name: search_enable_regex + in: query + required: false + schema: + type: boolean + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerLogsResponse' '400': description: '' content: @@ -7540,22 +8661,51 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - '/cloud/uploads/{upload_id}/complete': - post: - description: Marks an upload as complete. - operationId: cloud_uploads_completeUpload + '/v1/containers/{container}/metrics/history': + get: + description: Returns the metrics for a given container. + operationId: containers_metrics_get tags: - - CloudUploads + - ContainersMetrics parameters: - - name: upload_id + - name: container in: path + description: The id of the container to destroy required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false schema: type: string - format: uuid + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + required: true + schema: + type: integer + - name: end + in: query + required: true + schema: + type: integer + - name: interval + in: query + required: true + schema: + type: integer responses: - '204': + '200': description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerMetricsResponse' '400': description: '' content: @@ -9634,8 +10784,6 @@ components: $ref: '#/components/schemas/ActorsCreateActorRuntimeRequest' network: $ref: '#/components/schemas/ActorsCreateActorNetworkRequest' - resources: - $ref: '#/components/schemas/ActorsResources' lifecycle: $ref: '#/components/schemas/ActorsLifecycle' required: @@ -9730,6 +10878,7 @@ components: required: - actors - pagination +<<<<<<< HEAD ActorsGetActorUsageResponse: type: object properties: @@ -9760,13 +10909,125 @@ components: - metric_types - metric_values ActorsQueryActorsResponse: +======= + ActorsV1GetActorResponse: + type: object + properties: + actor: + $ref: '#/components/schemas/ActorsV1Actor' + required: + - actor + ActorsV1CreateActorRequest: + type: object + properties: + region: + type: string + tags: {} + build: + type: string + format: uuid + build_tags: {} + runtime: + $ref: '#/components/schemas/ActorsV1CreateActorRuntimeRequest' + network: + $ref: '#/components/schemas/ActorsV1CreateActorNetworkRequest' + resources: + $ref: '#/components/schemas/ActorsV1Resources' + lifecycle: + $ref: '#/components/schemas/ActorsV1Lifecycle' + required: + - tags + ActorsV1CreateActorRuntimeRequest: + type: object + properties: + environment: + type: object + additionalProperties: + type: string + network: + $ref: '#/components/schemas/ActorsV1CreateActorRuntimeNetworkRequest' + ActorsV1CreateActorRuntimeNetworkRequest: + type: object + properties: + endpoint_type: + $ref: '#/components/schemas/ActorsV1EndpointType' + required: + - endpoint_type + ActorsV1CreateActorNetworkRequest: + type: object + properties: + mode: + $ref: '#/components/schemas/ActorsV1NetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ActorsV1CreateActorPortRequest' + wait_ready: + type: boolean + ActorsV1CreateActorPortRequest: + type: object + properties: + protocol: + $ref: '#/components/schemas/ActorsV1PortProtocol' + internal_port: + type: integer + routing: + $ref: '#/components/schemas/ActorsV1PortRouting' + required: + - protocol + ActorsV1CreateActorResponse: + type: object + properties: + actor: + $ref: '#/components/schemas/ActorsV1Actor' + description: The actor that was created + required: + - actor + ActorsV1DestroyActorResponse: + type: object + properties: {} + ActorsV1UpgradeActorRequest: + type: object + properties: + build: + type: string + format: uuid + build_tags: {} + ActorsV1UpgradeActorResponse: + type: object + properties: {} + ActorsV1UpgradeAllActorsRequest: + type: object + properties: + tags: {} + build: + type: string + format: uuid + build_tags: {} + required: + - tags + ActorsV1UpgradeAllActorsResponse: + type: object + properties: + count: + type: integer + format: int64 + required: + - count + ActorsV1ListActorsResponse: +>>>>>>> 43e5048bc (fix: api changes) type: object properties: actors: type: array items: +<<<<<<< HEAD $ref: '#/components/schemas/ActorsActor' description: A list of actors matching the query +======= + $ref: '#/components/schemas/ActorsV1Actor' + description: A list of actors for the project associated with the token. +>>>>>>> 43e5048bc (fix: api changes) pagination: $ref: '#/components/schemas/Pagination' required: @@ -10449,41 +11710,159 @@ components: properties: {} CloudVersionMatchmakerMatchmakerConfig: type: object - description: Matchmaker configuration for a given version. + description: Matchmaker configuration for a given version. + properties: + game_modes: + type: object + additionalProperties: + $ref: '#/components/schemas/CloudVersionMatchmakerGameMode' + description: A list of game modes. + captcha: + $ref: '#/components/schemas/CloudVersionMatchmakerCaptcha' + dev_hostname: + type: string + description: _Configures Rivet CLI behavior. Has no effect on server behavior._ + regions: + type: object + additionalProperties: + $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRegion' + max_players: + type: integer + max_players_direct: + type: integer + max_players_party: + type: integer + docker: + $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRuntimeDocker' + tier: + type: string + idle_lobbies: + $ref: '#/components/schemas/CloudVersionMatchmakerGameModeIdleLobbiesConfig' + lobby_groups: + type: array + items: + $ref: '#/components/schemas/CloudVersionMatchmakerLobbyGroup' + description: |- + **Deprecated: use `game_modes` instead** + A list of game modes. + ContainersGetContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + required: + - container + ContainersCreateContainerRequest: + type: object + properties: + region: + type: string + tags: {} + build: + type: string + format: uuid + build_tags: {} + runtime: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeRequest' + network: + $ref: '#/components/schemas/ContainersCreateContainerNetworkRequest' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + required: + - tags + - resources + ContainersCreateContainerRuntimeRequest: + type: object + properties: + environment: + type: object + additionalProperties: + type: string + network: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeNetworkRequest' + ContainersCreateContainerRuntimeNetworkRequest: + type: object + properties: + endpoint_type: + $ref: '#/components/schemas/ContainersEndpointType' + required: + - endpoint_type + ContainersCreateContainerNetworkRequest: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersCreateContainerPortRequest' + wait_ready: + type: boolean + ContainersCreateContainerPortRequest: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + ContainersCreateContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + description: The container that was created + required: + - container + ContainersDestroyContainerResponse: + type: object + properties: {} + ContainersUpgradeContainerRequest: + type: object + properties: + build: + type: string + format: uuid + build_tags: {} + ContainersUpgradeContainerResponse: + type: object + properties: {} + ContainersUpgradeAllContainersRequest: + type: object + properties: + tags: {} + build: + type: string + format: uuid + build_tags: {} + required: + - tags + ContainersUpgradeAllContainersResponse: + type: object properties: - game_modes: - type: object - additionalProperties: - $ref: '#/components/schemas/CloudVersionMatchmakerGameMode' - description: A list of game modes. - captcha: - $ref: '#/components/schemas/CloudVersionMatchmakerCaptcha' - dev_hostname: - type: string - description: _Configures Rivet CLI behavior. Has no effect on server behavior._ - regions: - type: object - additionalProperties: - $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRegion' - max_players: - type: integer - max_players_direct: - type: integer - max_players_party: + count: type: integer - docker: - $ref: '#/components/schemas/CloudVersionMatchmakerGameModeRuntimeDocker' - tier: - type: string - idle_lobbies: - $ref: '#/components/schemas/CloudVersionMatchmakerGameModeIdleLobbiesConfig' - lobby_groups: + format: int64 + required: + - count + ContainersListContainersResponse: + type: object + properties: + containers: type: array items: - $ref: '#/components/schemas/CloudVersionMatchmakerLobbyGroup' - description: |- - **Deprecated: use `game_modes` instead** - A list of game modes. + $ref: '#/components/schemas/ContainersContainer' + description: A list of containers for the project associated with the token. + pagination: + $ref: '#/components/schemas/Pagination' + required: + - containers + - pagination CoreIntercomPegboardMarkClientRegisteredRequest: type: object properties: @@ -10996,26 +12375,246 @@ components: servers: type: array items: - $ref: '#/components/schemas/ServersServer' - description: A list of servers for the game associated with the token. + $ref: '#/components/schemas/ServersServer' + description: A list of servers for the game associated with the token. + required: + - servers + ActorsActor: + type: object + properties: + id: + $ref: '#/components/schemas/Id' + region: + type: string + tags: {} + runtime: + $ref: '#/components/schemas/ActorsRuntime' + network: + $ref: '#/components/schemas/ActorsNetwork' + lifecycle: + $ref: '#/components/schemas/ActorsLifecycle' + created_at: + $ref: '#/components/schemas/Timestamp' + started_at: + $ref: '#/components/schemas/Timestamp' + destroyed_at: + $ref: '#/components/schemas/Timestamp' + required: + - id + - region + - tags + - runtime + - network + - lifecycle + - created_at + ActorsRuntime: + type: object + properties: + build: + type: string + format: uuid + arguments: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + required: + - build + ActorsLifecycle: + type: object + properties: + kill_timeout: + type: integer + format: int64 + description: >- + The duration to wait for in milliseconds before killing the actor. + This should be set to a safe default, and can be overridden during a + DELETE request if needed. + durable: + type: boolean + description: >- + If true, the actor will try to reschedule itself automatically in + the event of a crash or a datacenter failover. The actor will not + reschedule if it exits successfully. + ActorsNetwork: + type: object + properties: + mode: + $ref: '#/components/schemas/ActorsNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ActorsPort' + required: + - mode + - ports + ActorsNetworkMode: + type: string + enum: + - bridge + - host + ActorsPort: + type: object + properties: + protocol: + $ref: '#/components/schemas/ActorsPortProtocol' + internal_port: + type: integer + hostname: + type: string + port: + type: integer + path: + type: string + url: + type: string + description: >- + Fully formed connection URL including protocol, hostname, port, and + path, if applicable. + routing: + $ref: '#/components/schemas/ActorsPortRouting' + required: + - protocol + - routing + ActorsPortProtocol: + type: string + enum: + - http + - https + - tcp + - tcp_tls + - udp + ActorsPortRouting: + type: object + properties: + guard: + $ref: '#/components/schemas/ActorsGuardRouting' + host: + $ref: '#/components/schemas/ActorsHostRouting' + ActorsGuardRouting: + type: object + properties: {} + ActorsHostRouting: + type: object + properties: {} + ActorsEndpointType: + type: string + enum: + - hostname + - path + ActorsGetActorLogsResponse: + type: object + properties: + actor_ids: + type: array + items: + type: string + description: >- + List of actor IDs in these logs. The order of these correspond to + the index in the log entry. + lines: + type: array + items: + type: string + description: Sorted old to new. + timestamps: + type: array + items: + $ref: '#/components/schemas/Timestamp' + description: Sorted old to new. + streams: + type: array + items: + type: integer + description: |- + Streams the logs came from. + + 0 = stdout + 1 = stderr + foreigns: + type: array + items: + type: boolean + description: List of flags denoting if this log is not directly from the actor. + actor_indices: + type: array + items: + type: integer + description: >- + Index of the actor that this log was for. Use this index to look the + full ID in `actor_ids`. + watch: + $ref: '#/components/schemas/WatchResponse' + required: + - actor_ids + - lines + - timestamps + - streams + - foreigns + - actor_indices + - watch + ActorsExportActorLogsResponse: + type: object + properties: + url: + type: string + description: Presigned URL to download the exported logs + required: + - url + ActorsGetActorMetricsResponse: + type: object + properties: + actor_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double required: - - servers - ActorsActor: + - actor_ids + - metric_names + - metric_attributes + - metric_types + - metric_values + ActorsV1Actor: type: object properties: id: - $ref: '#/components/schemas/Id' + type: string + format: uuid region: type: string tags: {} runtime: - $ref: '#/components/schemas/ActorsRuntime' + $ref: '#/components/schemas/ActorsV1Runtime' network: - $ref: '#/components/schemas/ActorsNetwork' + $ref: '#/components/schemas/ActorsV1Network' resources: - $ref: '#/components/schemas/ActorsResources' + $ref: '#/components/schemas/ActorsV1Resources' lifecycle: - $ref: '#/components/schemas/ActorsLifecycle' + $ref: '#/components/schemas/ActorsV1Lifecycle' created_at: $ref: '#/components/schemas/Timestamp' started_at: @@ -11030,7 +12629,7 @@ components: - network - lifecycle - created_at - ActorsRuntime: + ActorsV1Runtime: type: object properties: build: @@ -11046,7 +12645,7 @@ components: type: string required: - build - ActorsLifecycle: + ActorsV1Lifecycle: type: object properties: kill_timeout: @@ -11062,7 +12661,7 @@ components: If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. - ActorsResources: + ActorsV1Resources: type: object properties: cpu: @@ -11080,28 +12679,28 @@ components: required: - cpu - memory - ActorsNetwork: + ActorsV1Network: type: object properties: mode: - $ref: '#/components/schemas/ActorsNetworkMode' + $ref: '#/components/schemas/ActorsV1NetworkMode' ports: type: object additionalProperties: - $ref: '#/components/schemas/ActorsPort' + $ref: '#/components/schemas/ActorsV1Port' required: - mode - ports - ActorsNetworkMode: + ActorsV1NetworkMode: type: string enum: - bridge - host - ActorsPort: + ActorsV1Port: type: object properties: protocol: - $ref: '#/components/schemas/ActorsPortProtocol' + $ref: '#/components/schemas/ActorsV1PortProtocol' internal_port: type: integer hostname: @@ -11116,11 +12715,11 @@ components: Fully formed connection URL including protocol, hostname, port, and path, if applicable. routing: - $ref: '#/components/schemas/ActorsPortRouting' + $ref: '#/components/schemas/ActorsV1PortRouting' required: - protocol - routing - ActorsPortProtocol: + ActorsV1PortProtocol: type: string enum: - http @@ -11128,31 +12727,31 @@ components: - tcp - tcp_tls - udp - ActorsPortRouting: + ActorsV1PortRouting: type: object properties: guard: - $ref: '#/components/schemas/ActorsGuardRouting' + $ref: '#/components/schemas/ActorsV1GuardRouting' host: - $ref: '#/components/schemas/ActorsHostRouting' - ActorsGuardRouting: + $ref: '#/components/schemas/ActorsV1HostRouting' + ActorsV1GuardRouting: type: object properties: {} - ActorsHostRouting: + ActorsV1HostRouting: type: object properties: {} - ActorsEndpointType: + ActorsV1EndpointType: type: string enum: - hostname - path - ActorsGetActorLogsResponse: + ActorsV1GetActorLogsResponse: type: object properties: actor_ids: type: array items: - $ref: '#/components/schemas/Id' + type: string description: >- List of actor IDs in these logs. The order of these correspond to the index in the log entry. @@ -11175,11 +12774,6 @@ components: 0 = stdout 1 = stderr - foreigns: - type: array - items: - type: boolean - description: List of flags denoting if this log is not directly from the actor. actor_indices: type: array items: @@ -11194,18 +12788,15 @@ components: - lines - timestamps - streams - - foreigns - actor_indices - watch - ActorsExportActorLogsResponse: - type: object - properties: - url: - type: string - description: Presigned URL to download the exported logs - required: - - url - ActorsGetActorMetricsResponse: + ActorsV1QueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ActorsV1GetActorMetricsResponse: type: object properties: actor_ids: @@ -13251,6 +14842,246 @@ components: properties: cursor: type: string + ContainersContainer: + type: object + properties: + id: + $ref: '#/components/schemas/Id' + region: + type: string + tags: {} + runtime: + $ref: '#/components/schemas/ContainersRuntime' + network: + $ref: '#/components/schemas/ContainersNetwork' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + created_at: + $ref: '#/components/schemas/Timestamp' + started_at: + $ref: '#/components/schemas/Timestamp' + destroyed_at: + $ref: '#/components/schemas/Timestamp' + required: + - id + - region + - tags + - runtime + - network + - resources + - lifecycle + - created_at + ContainersRuntime: + type: object + properties: + build: + type: string + format: uuid + arguments: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + required: + - build + ContainersLifecycle: + type: object + properties: + kill_timeout: + type: integer + format: int64 + description: >- + The duration to wait for in milliseconds before killing the + container. This should be set to a safe default, and can be + overridden during a DELETE request if needed. + durable: + type: boolean + description: >- + If true, the container will try to reschedule itself automatically + in the event of a crash or a datacenter failover. The container will + not reschedule if it exits successfully. + ContainersResources: + type: object + properties: + cpu: + type: integer + description: >- + The number of CPU cores in millicores, or 1/1000 of a core. For + example, + + 1/8 of a core would be 125 millicores, and 1 core would be 1000 + + millicores. + memory: + type: integer + description: The amount of memory in megabytes + required: + - cpu + - memory + ContainersNetwork: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersPort' + required: + - mode + - ports + ContainersNetworkMode: + type: string + enum: + - bridge + - host + ContainersPort: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + hostname: + type: string + port: + type: integer + path: + type: string + url: + type: string + description: >- + Fully formed connection URL including protocol, hostname, port, and + path, if applicable. + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + - routing + ContainersPortProtocol: + type: string + enum: + - http + - https + - tcp + - tcp_tls + - udp + ContainersPortRouting: + type: object + properties: + guard: + $ref: '#/components/schemas/ContainersGuardRouting' + host: + $ref: '#/components/schemas/ContainersHostRouting' + ContainersGuardRouting: + type: object + properties: {} + ContainersHostRouting: + type: object + properties: {} + ContainersEndpointType: + type: string + enum: + - hostname + - path + ContainersGetContainerLogsResponse: + type: object + properties: + container_ids: + type: array + items: + $ref: '#/components/schemas/Id' + description: >- + List of container IDs in these logs. The order of these correspond + to the index in the log entry. + lines: + type: array + items: + type: string + description: Sorted old to new. + timestamps: + type: array + items: + $ref: '#/components/schemas/Timestamp' + description: Sorted old to new. + streams: + type: array + items: + type: integer + description: |- + Streams the logs came from. + + 0 = stdout + 1 = stderr + foreigns: + type: array + items: + type: boolean + description: >- + List of flags denoting if this log is not directly from the + container. + container_indices: + type: array + items: + type: integer + description: >- + Index of the container that this log was for. Use this index to look + the full ID in `container_ids`. + watch: + $ref: '#/components/schemas/WatchResponse' + required: + - container_ids + - lines + - timestamps + - streams + - foreigns + - container_indices + - watch + ContainersQueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ContainersGetContainerMetricsResponse: + type: object + properties: + container_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double + required: + - container_ids + - metric_names + - metric_attributes + - metric_types + - metric_values GameHandle: type: object properties: diff --git a/sdks/api/full/rust/.openapi-generator/FILES b/sdks/api/full/rust/.openapi-generator/FILES index aacc773efa..97ad299d8b 100644 --- a/sdks/api/full/rust/.openapi-generator/FILES +++ b/sdks/api/full/rust/.openapi-generator/FILES @@ -27,12 +27,43 @@ docs/ActorsNetworkMode.md docs/ActorsPort.md docs/ActorsPortProtocol.md docs/ActorsPortRouting.md +<<<<<<< HEAD docs/ActorsQueryActorsResponse.md docs/ActorsResources.md +======= +docs/ActorsQueryLogStream.md +>>>>>>> 43e5048bc (fix: api changes) docs/ActorsRuntime.md docs/ActorsUpgradeActorRequest.md docs/ActorsUpgradeAllActorsRequest.md docs/ActorsUpgradeAllActorsResponse.md +docs/ActorsV1Actor.md +docs/ActorsV1Api.md +docs/ActorsV1CreateActorNetworkRequest.md +docs/ActorsV1CreateActorPortRequest.md +docs/ActorsV1CreateActorRequest.md +docs/ActorsV1CreateActorResponse.md +docs/ActorsV1CreateActorRuntimeNetworkRequest.md +docs/ActorsV1CreateActorRuntimeRequest.md +docs/ActorsV1EndpointType.md +docs/ActorsV1GetActorLogsResponse.md +docs/ActorsV1GetActorMetricsResponse.md +docs/ActorsV1GetActorResponse.md +docs/ActorsV1Lifecycle.md +docs/ActorsV1ListActorsResponse.md +docs/ActorsV1LogsApi.md +docs/ActorsV1MetricsApi.md +docs/ActorsV1Network.md +docs/ActorsV1NetworkMode.md +docs/ActorsV1Port.md +docs/ActorsV1PortProtocol.md +docs/ActorsV1PortRouting.md +docs/ActorsV1QueryLogStream.md +docs/ActorsV1Resources.md +docs/ActorsV1Runtime.md +docs/ActorsV1UpgradeActorRequest.md +docs/ActorsV1UpgradeAllActorsRequest.md +docs/ActorsV1UpgradeAllActorsResponse.md docs/AuthCompleteStatus.md docs/AuthIdentityCompleteEmailVerificationRequest.md docs/AuthIdentityCompleteEmailVerificationResponse.md @@ -223,6 +254,33 @@ docs/CloudVersionMatchmakerPortProtocol.md docs/CloudVersionMatchmakerPortRange.md docs/CloudVersionMatchmakerProxyKind.md docs/CloudVersionSummary.md +docs/ContainersApi.md +docs/ContainersContainer.md +docs/ContainersCreateContainerNetworkRequest.md +docs/ContainersCreateContainerPortRequest.md +docs/ContainersCreateContainerRequest.md +docs/ContainersCreateContainerResponse.md +docs/ContainersCreateContainerRuntimeNetworkRequest.md +docs/ContainersCreateContainerRuntimeRequest.md +docs/ContainersEndpointType.md +docs/ContainersGetContainerLogsResponse.md +docs/ContainersGetContainerMetricsResponse.md +docs/ContainersGetContainerResponse.md +docs/ContainersLifecycle.md +docs/ContainersListContainersResponse.md +docs/ContainersLogsApi.md +docs/ContainersMetricsApi.md +docs/ContainersNetwork.md +docs/ContainersNetworkMode.md +docs/ContainersPort.md +docs/ContainersPortProtocol.md +docs/ContainersPortRouting.md +docs/ContainersQueryLogStream.md +docs/ContainersResources.md +docs/ContainersRuntime.md +docs/ContainersUpgradeAllContainersRequest.md +docs/ContainersUpgradeAllContainersResponse.md +docs/ContainersUpgradeContainerRequest.md docs/CoreIntercomPegboardApi.md docs/CoreIntercomPegboardMarkClientRegisteredRequest.md docs/EdgeIntercomPegboardApi.md @@ -400,6 +458,9 @@ git_push.sh src/apis/actors_api.rs src/apis/actors_logs_api.rs src/apis/actors_metrics_api.rs +src/apis/actors_v1_api.rs +src/apis/actors_v1_logs_api.rs +src/apis/actors_v1_metrics_api.rs src/apis/auth_identity_email_api.rs src/apis/auth_tokens_api.rs src/apis/builds_api.rs @@ -421,6 +482,9 @@ src/apis/cloud_logs_api.rs src/apis/cloud_tiers_api.rs src/apis/cloud_uploads_api.rs src/apis/configuration.rs +src/apis/containers_api.rs +src/apis/containers_logs_api.rs +src/apis/containers_metrics_api.rs src/apis/core_intercom_pegboard_api.rs src/apis/edge_intercom_pegboard_api.rs src/apis/games_environments_tokens_api.rs @@ -467,12 +531,40 @@ src/models/actors_network_mode.rs src/models/actors_port.rs src/models/actors_port_protocol.rs src/models/actors_port_routing.rs +<<<<<<< HEAD src/models/actors_query_actors_response.rs src/models/actors_resources.rs +======= +src/models/actors_query_log_stream.rs +>>>>>>> 43e5048bc (fix: api changes) src/models/actors_runtime.rs src/models/actors_upgrade_actor_request.rs src/models/actors_upgrade_all_actors_request.rs src/models/actors_upgrade_all_actors_response.rs +src/models/actors_v1_actor.rs +src/models/actors_v1_create_actor_network_request.rs +src/models/actors_v1_create_actor_port_request.rs +src/models/actors_v1_create_actor_request.rs +src/models/actors_v1_create_actor_response.rs +src/models/actors_v1_create_actor_runtime_network_request.rs +src/models/actors_v1_create_actor_runtime_request.rs +src/models/actors_v1_endpoint_type.rs +src/models/actors_v1_get_actor_logs_response.rs +src/models/actors_v1_get_actor_metrics_response.rs +src/models/actors_v1_get_actor_response.rs +src/models/actors_v1_lifecycle.rs +src/models/actors_v1_list_actors_response.rs +src/models/actors_v1_network.rs +src/models/actors_v1_network_mode.rs +src/models/actors_v1_port.rs +src/models/actors_v1_port_protocol.rs +src/models/actors_v1_port_routing.rs +src/models/actors_v1_query_log_stream.rs +src/models/actors_v1_resources.rs +src/models/actors_v1_runtime.rs +src/models/actors_v1_upgrade_actor_request.rs +src/models/actors_v1_upgrade_all_actors_request.rs +src/models/actors_v1_upgrade_all_actors_response.rs src/models/auth_complete_status.rs src/models/auth_identity_complete_email_verification_request.rs src/models/auth_identity_complete_email_verification_response.rs @@ -643,6 +735,30 @@ src/models/cloud_version_matchmaker_port_protocol.rs src/models/cloud_version_matchmaker_port_range.rs src/models/cloud_version_matchmaker_proxy_kind.rs src/models/cloud_version_summary.rs +src/models/containers_container.rs +src/models/containers_create_container_network_request.rs +src/models/containers_create_container_port_request.rs +src/models/containers_create_container_request.rs +src/models/containers_create_container_response.rs +src/models/containers_create_container_runtime_network_request.rs +src/models/containers_create_container_runtime_request.rs +src/models/containers_endpoint_type.rs +src/models/containers_get_container_logs_response.rs +src/models/containers_get_container_metrics_response.rs +src/models/containers_get_container_response.rs +src/models/containers_lifecycle.rs +src/models/containers_list_containers_response.rs +src/models/containers_network.rs +src/models/containers_network_mode.rs +src/models/containers_port.rs +src/models/containers_port_protocol.rs +src/models/containers_port_routing.rs +src/models/containers_query_log_stream.rs +src/models/containers_resources.rs +src/models/containers_runtime.rs +src/models/containers_upgrade_all_containers_request.rs +src/models/containers_upgrade_all_containers_response.rs +src/models/containers_upgrade_container_request.rs src/models/core_intercom_pegboard_mark_client_registered_request.rs src/models/edge_intercom_pegboard_toggle_client_drain_request.rs src/models/error_body.rs diff --git a/sdks/api/full/rust/README.md b/sdks/api/full/rust/README.md index 6b4171db30..67098bbc24 100644 --- a/sdks/api/full/rust/README.md +++ b/sdks/api/full/rust/README.md @@ -25,6 +25,7 @@ All URIs are relative to *https://api.rivet.gg* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- +<<<<<<< HEAD *ActorsApi* | [**actors_create**](docs/ActorsApi.md#actors_create) | **POST** /actors | *ActorsApi* | [**actors_destroy**](docs/ActorsApi.md#actors_destroy) | **DELETE** /actors/{actor} | *ActorsApi* | [**actors_get**](docs/ActorsApi.md#actors_get) | **GET** /actors/{actor} | @@ -36,6 +37,24 @@ Class | Method | HTTP request | Description *ActorsLogsApi* | [**actors_logs_export**](docs/ActorsLogsApi.md#actors_logs_export) | **POST** /actors/logs/export | *ActorsLogsApi* | [**actors_logs_get**](docs/ActorsLogsApi.md#actors_logs_get) | **GET** /actors/logs | *ActorsMetricsApi* | [**actors_metrics_get**](docs/ActorsMetricsApi.md#actors_metrics_get) | **GET** /actors/{actor}/metrics/history | +======= +*ActorsApi* | [**actors_create**](docs/ActorsApi.md#actors_create) | **POST** /v2/actors | +*ActorsApi* | [**actors_destroy**](docs/ActorsApi.md#actors_destroy) | **DELETE** /v2/actors/{actor} | +*ActorsApi* | [**actors_get**](docs/ActorsApi.md#actors_get) | **GET** /v2/actors/{actor} | +*ActorsApi* | [**actors_list**](docs/ActorsApi.md#actors_list) | **GET** /v2/actors | +*ActorsApi* | [**actors_upgrade**](docs/ActorsApi.md#actors_upgrade) | **POST** /v2/actors/{actor}/upgrade | +*ActorsApi* | [**actors_upgrade_all**](docs/ActorsApi.md#actors_upgrade_all) | **POST** /v2/actors/upgrade | +*ActorsLogsApi* | [**actors_logs_get**](docs/ActorsLogsApi.md#actors_logs_get) | **GET** /v2/actors/logs | +*ActorsMetricsApi* | [**actors_metrics_get**](docs/ActorsMetricsApi.md#actors_metrics_get) | **GET** /v2/actors/{actor}/metrics/history | +*ActorsV1Api* | [**actors_v1_create**](docs/ActorsV1Api.md#actors_v1_create) | **POST** /actors | +*ActorsV1Api* | [**actors_v1_destroy**](docs/ActorsV1Api.md#actors_v1_destroy) | **DELETE** /actors/{actor} | +*ActorsV1Api* | [**actors_v1_get**](docs/ActorsV1Api.md#actors_v1_get) | **GET** /actors/{actor} | +*ActorsV1Api* | [**actors_v1_list**](docs/ActorsV1Api.md#actors_v1_list) | **GET** /actors | +*ActorsV1Api* | [**actors_v1_upgrade**](docs/ActorsV1Api.md#actors_v1_upgrade) | **POST** /actors/{actor}/upgrade | +*ActorsV1Api* | [**actors_v1_upgrade_all**](docs/ActorsV1Api.md#actors_v1_upgrade_all) | **POST** /actors/upgrade | +*ActorsV1LogsApi* | [**actors_v1_logs_get**](docs/ActorsV1LogsApi.md#actors_v1_logs_get) | **GET** /actors/logs | +*ActorsV1MetricsApi* | [**actors_v1_metrics_get**](docs/ActorsV1MetricsApi.md#actors_v1_metrics_get) | **GET** /actors/{actor}/metrics/history | +>>>>>>> 43e5048bc (fix: api changes) *AuthIdentityEmailApi* | [**auth_identity_email_complete_email_verification**](docs/AuthIdentityEmailApi.md#auth_identity_email_complete_email_verification) | **POST** /auth/identity/email/complete-verification | *AuthIdentityEmailApi* | [**auth_identity_email_start_email_verification**](docs/AuthIdentityEmailApi.md#auth_identity_email_start_email_verification) | **POST** /auth/identity/email/start-verification | *AuthTokensApi* | [**auth_tokens_refresh_identity_token**](docs/AuthTokensApi.md#auth_tokens_refresh_identity_token) | **POST** /auth/tokens/identity | @@ -96,6 +115,14 @@ Class | Method | HTTP request | Description *CloudLogsApi* | [**cloud_logs_get_ray_perf_logs**](docs/CloudLogsApi.md#cloud_logs_get_ray_perf_logs) | **GET** /cloud/rays/{ray_id}/perf | *CloudTiersApi* | [**cloud_tiers_get_region_tiers**](docs/CloudTiersApi.md#cloud_tiers_get_region_tiers) | **GET** /cloud/region-tiers | *CloudUploadsApi* | [**cloud_uploads_complete_upload**](docs/CloudUploadsApi.md#cloud_uploads_complete_upload) | **POST** /cloud/uploads/{upload_id}/complete | +*ContainersApi* | [**containers_create**](docs/ContainersApi.md#containers_create) | **POST** /v1/containers | +*ContainersApi* | [**containers_destroy**](docs/ContainersApi.md#containers_destroy) | **DELETE** /v1/containers/{container} | +*ContainersApi* | [**containers_get**](docs/ContainersApi.md#containers_get) | **GET** /v1/containers/{container} | +*ContainersApi* | [**containers_list**](docs/ContainersApi.md#containers_list) | **GET** /v1/containers | +*ContainersApi* | [**containers_upgrade**](docs/ContainersApi.md#containers_upgrade) | **POST** /v1/containers/{container}/upgrade | +*ContainersApi* | [**containers_upgrade_all**](docs/ContainersApi.md#containers_upgrade_all) | **POST** /v1/containers/upgrade | +*ContainersLogsApi* | [**containers_logs_get**](docs/ContainersLogsApi.md#containers_logs_get) | **GET** /v1/containers/logs | +*ContainersMetricsApi* | [**containers_metrics_get**](docs/ContainersMetricsApi.md#containers_metrics_get) | **GET** /v1/containers/{container}/metrics/history | *CoreIntercomPegboardApi* | [**core_intercom_pegboard_mark_client_registered**](docs/CoreIntercomPegboardApi.md#core_intercom_pegboard_mark_client_registered) | **POST** /pegboard/client/{client_id}/registered | *EdgeIntercomPegboardApi* | [**edge_intercom_pegboard_prewarm_image**](docs/EdgeIntercomPegboardApi.md#edge_intercom_pegboard_prewarm_image) | **POST** /pegboard/image/{image_id}/prewarm | *EdgeIntercomPegboardApi* | [**edge_intercom_pegboard_toggle_client_drain**](docs/EdgeIntercomPegboardApi.md#edge_intercom_pegboard_toggle_client_drain) | **POST** /pegboard/client/{client_id}/toggle-drain | @@ -198,12 +225,40 @@ Class | Method | HTTP request | Description - [ActorsPort](docs/ActorsPort.md) - [ActorsPortProtocol](docs/ActorsPortProtocol.md) - [ActorsPortRouting](docs/ActorsPortRouting.md) +<<<<<<< HEAD - [ActorsQueryActorsResponse](docs/ActorsQueryActorsResponse.md) - [ActorsResources](docs/ActorsResources.md) +======= + - [ActorsQueryLogStream](docs/ActorsQueryLogStream.md) +>>>>>>> 43e5048bc (fix: api changes) - [ActorsRuntime](docs/ActorsRuntime.md) - [ActorsUpgradeActorRequest](docs/ActorsUpgradeActorRequest.md) - [ActorsUpgradeAllActorsRequest](docs/ActorsUpgradeAllActorsRequest.md) - [ActorsUpgradeAllActorsResponse](docs/ActorsUpgradeAllActorsResponse.md) + - [ActorsV1Actor](docs/ActorsV1Actor.md) + - [ActorsV1CreateActorNetworkRequest](docs/ActorsV1CreateActorNetworkRequest.md) + - [ActorsV1CreateActorPortRequest](docs/ActorsV1CreateActorPortRequest.md) + - [ActorsV1CreateActorRequest](docs/ActorsV1CreateActorRequest.md) + - [ActorsV1CreateActorResponse](docs/ActorsV1CreateActorResponse.md) + - [ActorsV1CreateActorRuntimeNetworkRequest](docs/ActorsV1CreateActorRuntimeNetworkRequest.md) + - [ActorsV1CreateActorRuntimeRequest](docs/ActorsV1CreateActorRuntimeRequest.md) + - [ActorsV1EndpointType](docs/ActorsV1EndpointType.md) + - [ActorsV1GetActorLogsResponse](docs/ActorsV1GetActorLogsResponse.md) + - [ActorsV1GetActorMetricsResponse](docs/ActorsV1GetActorMetricsResponse.md) + - [ActorsV1GetActorResponse](docs/ActorsV1GetActorResponse.md) + - [ActorsV1Lifecycle](docs/ActorsV1Lifecycle.md) + - [ActorsV1ListActorsResponse](docs/ActorsV1ListActorsResponse.md) + - [ActorsV1Network](docs/ActorsV1Network.md) + - [ActorsV1NetworkMode](docs/ActorsV1NetworkMode.md) + - [ActorsV1Port](docs/ActorsV1Port.md) + - [ActorsV1PortProtocol](docs/ActorsV1PortProtocol.md) + - [ActorsV1PortRouting](docs/ActorsV1PortRouting.md) + - [ActorsV1QueryLogStream](docs/ActorsV1QueryLogStream.md) + - [ActorsV1Resources](docs/ActorsV1Resources.md) + - [ActorsV1Runtime](docs/ActorsV1Runtime.md) + - [ActorsV1UpgradeActorRequest](docs/ActorsV1UpgradeActorRequest.md) + - [ActorsV1UpgradeAllActorsRequest](docs/ActorsV1UpgradeAllActorsRequest.md) + - [ActorsV1UpgradeAllActorsResponse](docs/ActorsV1UpgradeAllActorsResponse.md) - [AuthCompleteStatus](docs/AuthCompleteStatus.md) - [AuthIdentityCompleteEmailVerificationRequest](docs/AuthIdentityCompleteEmailVerificationRequest.md) - [AuthIdentityCompleteEmailVerificationResponse](docs/AuthIdentityCompleteEmailVerificationResponse.md) @@ -374,6 +429,30 @@ Class | Method | HTTP request | Description - [CloudVersionMatchmakerPortRange](docs/CloudVersionMatchmakerPortRange.md) - [CloudVersionMatchmakerProxyKind](docs/CloudVersionMatchmakerProxyKind.md) - [CloudVersionSummary](docs/CloudVersionSummary.md) + - [ContainersContainer](docs/ContainersContainer.md) + - [ContainersCreateContainerNetworkRequest](docs/ContainersCreateContainerNetworkRequest.md) + - [ContainersCreateContainerPortRequest](docs/ContainersCreateContainerPortRequest.md) + - [ContainersCreateContainerRequest](docs/ContainersCreateContainerRequest.md) + - [ContainersCreateContainerResponse](docs/ContainersCreateContainerResponse.md) + - [ContainersCreateContainerRuntimeNetworkRequest](docs/ContainersCreateContainerRuntimeNetworkRequest.md) + - [ContainersCreateContainerRuntimeRequest](docs/ContainersCreateContainerRuntimeRequest.md) + - [ContainersEndpointType](docs/ContainersEndpointType.md) + - [ContainersGetContainerLogsResponse](docs/ContainersGetContainerLogsResponse.md) + - [ContainersGetContainerMetricsResponse](docs/ContainersGetContainerMetricsResponse.md) + - [ContainersGetContainerResponse](docs/ContainersGetContainerResponse.md) + - [ContainersLifecycle](docs/ContainersLifecycle.md) + - [ContainersListContainersResponse](docs/ContainersListContainersResponse.md) + - [ContainersNetwork](docs/ContainersNetwork.md) + - [ContainersNetworkMode](docs/ContainersNetworkMode.md) + - [ContainersPort](docs/ContainersPort.md) + - [ContainersPortProtocol](docs/ContainersPortProtocol.md) + - [ContainersPortRouting](docs/ContainersPortRouting.md) + - [ContainersQueryLogStream](docs/ContainersQueryLogStream.md) + - [ContainersResources](docs/ContainersResources.md) + - [ContainersRuntime](docs/ContainersRuntime.md) + - [ContainersUpgradeAllContainersRequest](docs/ContainersUpgradeAllContainersRequest.md) + - [ContainersUpgradeAllContainersResponse](docs/ContainersUpgradeAllContainersResponse.md) + - [ContainersUpgradeContainerRequest](docs/ContainersUpgradeContainerRequest.md) - [CoreIntercomPegboardMarkClientRegisteredRequest](docs/CoreIntercomPegboardMarkClientRegisteredRequest.md) - [EdgeIntercomPegboardToggleClientDrainRequest](docs/EdgeIntercomPegboardToggleClientDrainRequest.md) - [ErrorBody](docs/ErrorBody.md) diff --git a/sdks/api/full/rust/docs/ActorsActor.md b/sdks/api/full/rust/docs/ActorsActor.md index 2e0a4dccfe..51f1a591c7 100644 --- a/sdks/api/full/rust/docs/ActorsActor.md +++ b/sdks/api/full/rust/docs/ActorsActor.md @@ -9,7 +9,6 @@ Name | Type | Description | Notes **tags** | Option<[**serde_json::Value**](.md)> | | **runtime** | [**crate::models::ActorsRuntime**](ActorsRuntime.md) | | **network** | [**crate::models::ActorsNetwork**](ActorsNetwork.md) | | -**resources** | Option<[**crate::models::ActorsResources**](ActorsResources.md)> | | [optional] **lifecycle** | [**crate::models::ActorsLifecycle**](ActorsLifecycle.md) | | **created_at** | **String** | RFC3339 timestamp | **started_at** | Option<**String**> | RFC3339 timestamp | [optional] diff --git a/sdks/api/full/rust/docs/ActorsApi.md b/sdks/api/full/rust/docs/ActorsApi.md index 2166a34a6f..894e6d3b75 100644 --- a/sdks/api/full/rust/docs/ActorsApi.md +++ b/sdks/api/full/rust/docs/ActorsApi.md @@ -4,6 +4,7 @@ All URIs are relative to *https://api.rivet.gg* Method | HTTP request | Description ------------- | ------------- | ------------- +<<<<<<< HEAD [**actors_create**](ActorsApi.md#actors_create) | **POST** /actors | [**actors_destroy**](ActorsApi.md#actors_destroy) | **DELETE** /actors/{actor} | [**actors_get**](ActorsApi.md#actors_get) | **GET** /actors/{actor} | @@ -12,6 +13,14 @@ Method | HTTP request | Description [**actors_upgrade**](ActorsApi.md#actors_upgrade) | **POST** /actors/{actor}/upgrade | [**actors_upgrade_all**](ActorsApi.md#actors_upgrade_all) | **POST** /actors/upgrade | [**actors_usage**](ActorsApi.md#actors_usage) | **GET** /actors/usage | +======= +[**actors_create**](ActorsApi.md#actors_create) | **POST** /v2/actors | +[**actors_destroy**](ActorsApi.md#actors_destroy) | **DELETE** /v2/actors/{actor} | +[**actors_get**](ActorsApi.md#actors_get) | **GET** /v2/actors/{actor} | +[**actors_list**](ActorsApi.md#actors_list) | **GET** /v2/actors | +[**actors_upgrade**](ActorsApi.md#actors_upgrade) | **POST** /v2/actors/{actor}/upgrade | +[**actors_upgrade_all**](ActorsApi.md#actors_upgrade_all) | **POST** /v2/actors/upgrade | +>>>>>>> 43e5048bc (fix: api changes) diff --git a/sdks/api/full/rust/docs/ActorsCreateActorRequest.md b/sdks/api/full/rust/docs/ActorsCreateActorRequest.md index 27d9c9d4f2..5f5686b46b 100644 --- a/sdks/api/full/rust/docs/ActorsCreateActorRequest.md +++ b/sdks/api/full/rust/docs/ActorsCreateActorRequest.md @@ -10,7 +10,6 @@ Name | Type | Description | Notes **build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] **runtime** | Option<[**crate::models::ActorsCreateActorRuntimeRequest**](ActorsCreateActorRuntimeRequest.md)> | | [optional] **network** | Option<[**crate::models::ActorsCreateActorNetworkRequest**](ActorsCreateActorNetworkRequest.md)> | | [optional] -**resources** | Option<[**crate::models::ActorsResources**](ActorsResources.md)> | | [optional] **lifecycle** | Option<[**crate::models::ActorsLifecycle**](ActorsLifecycle.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/api/full/rust/docs/ActorsLogsApi.md b/sdks/api/full/rust/docs/ActorsLogsApi.md index 72e930db8b..ba2d9273dc 100644 --- a/sdks/api/full/rust/docs/ActorsLogsApi.md +++ b/sdks/api/full/rust/docs/ActorsLogsApi.md @@ -4,8 +4,12 @@ All URIs are relative to *https://api.rivet.gg* Method | HTTP request | Description ------------- | ------------- | ------------- +<<<<<<< HEAD [**actors_logs_export**](ActorsLogsApi.md#actors_logs_export) | **POST** /actors/logs/export | [**actors_logs_get**](ActorsLogsApi.md#actors_logs_get) | **GET** /actors/logs | +======= +[**actors_logs_get**](ActorsLogsApi.md#actors_logs_get) | **GET** /v2/actors/logs | +>>>>>>> 43e5048bc (fix: api changes) diff --git a/sdks/api/full/rust/docs/ActorsMetricsApi.md b/sdks/api/full/rust/docs/ActorsMetricsApi.md index da0632ac74..b9865fb514 100644 --- a/sdks/api/full/rust/docs/ActorsMetricsApi.md +++ b/sdks/api/full/rust/docs/ActorsMetricsApi.md @@ -4,7 +4,7 @@ All URIs are relative to *https://api.rivet.gg* Method | HTTP request | Description ------------- | ------------- | ------------- -[**actors_metrics_get**](ActorsMetricsApi.md#actors_metrics_get) | **GET** /actors/{actor}/metrics/history | +[**actors_metrics_get**](ActorsMetricsApi.md#actors_metrics_get) | **GET** /v2/actors/{actor}/metrics/history | @@ -20,7 +20,7 @@ Returns the metrics for a given actor. Name | Type | Description | Required | Notes ------------- | ------------- | ------------- | ------------- | ------------- -**actor** | **uuid::Uuid** | The id of the actor to destroy | [required] | +**actor** | **String** | The id of the actor to destroy | [required] | **start** | **i32** | | [required] | **end** | **i32** | | [required] | **interval** | **i32** | | [required] | diff --git a/sdks/api/full/rust/docs/ActorsV1Actor.md b/sdks/api/full/rust/docs/ActorsV1Actor.md new file mode 100644 index 0000000000..bf26229831 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1Actor.md @@ -0,0 +1,20 @@ +# ActorsV1Actor + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**region** | **String** | | +**tags** | Option<[**serde_json::Value**](.md)> | | +**runtime** | [**crate::models::ActorsV1Runtime**](ActorsV1Runtime.md) | | +**network** | [**crate::models::ActorsV1Network**](ActorsV1Network.md) | | +**resources** | Option<[**crate::models::ActorsV1Resources**](ActorsV1Resources.md)> | | [optional] +**lifecycle** | [**crate::models::ActorsV1Lifecycle**](ActorsV1Lifecycle.md) | | +**created_at** | **String** | RFC3339 timestamp | +**started_at** | Option<**String**> | RFC3339 timestamp | [optional] +**destroyed_at** | Option<**String**> | RFC3339 timestamp | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1Api.md b/sdks/api/full/rust/docs/ActorsV1Api.md new file mode 100644 index 0000000000..1f7ed34eab --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1Api.md @@ -0,0 +1,213 @@ +# \ActorsV1Api + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**actors_v1_create**](ActorsV1Api.md#actors_v1_create) | **POST** /actors | +[**actors_v1_destroy**](ActorsV1Api.md#actors_v1_destroy) | **DELETE** /actors/{actor} | +[**actors_v1_get**](ActorsV1Api.md#actors_v1_get) | **GET** /actors/{actor} | +[**actors_v1_list**](ActorsV1Api.md#actors_v1_list) | **GET** /actors | +[**actors_v1_upgrade**](ActorsV1Api.md#actors_v1_upgrade) | **POST** /actors/{actor}/upgrade | +[**actors_v1_upgrade_all**](ActorsV1Api.md#actors_v1_upgrade_all) | **POST** /actors/upgrade | + + + +## actors_v1_create + +> crate::models::ActorsV1CreateActorResponse actors_v1_create(actors_v1_create_actor_request, project, environment, endpoint_type) + + +Create a new actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actors_v1_create_actor_request** | [**ActorsV1CreateActorRequest**](ActorsV1CreateActorRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ActorsV1EndpointType**](.md)> | | | + +### Return type + +[**crate::models::ActorsV1CreateActorResponse**](ActorsV1CreateActorResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## actors_v1_destroy + +> serde_json::Value actors_v1_destroy(actor, project, environment, override_kill_timeout) + + +Destroy a actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actor** | **uuid::Uuid** | The id of the actor to destroy | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**override_kill_timeout** | Option<**i64**> | The duration to wait for in milliseconds before killing the actor. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. | | + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## actors_v1_get + +> crate::models::ActorsV1GetActorResponse actors_v1_get(actor, project, environment, endpoint_type) + + +Gets a actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actor** | **uuid::Uuid** | The id of the actor to destroy | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ActorsV1EndpointType**](.md)> | | | + +### Return type + +[**crate::models::ActorsV1GetActorResponse**](ActorsV1GetActorResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## actors_v1_list + +> crate::models::ActorsV1ListActorsResponse actors_v1_list(project, environment, endpoint_type, tags_json, include_destroyed, cursor) + + +Lists all actors associated with the token used. Can be filtered by tags in the query string. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ActorsV1EndpointType**](.md)> | | | +**tags_json** | Option<**String**> | | | +**include_destroyed** | Option<**bool**> | | | +**cursor** | Option<**String**> | | | + +### Return type + +[**crate::models::ActorsV1ListActorsResponse**](ActorsV1ListActorsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## actors_v1_upgrade + +> serde_json::Value actors_v1_upgrade(actor, actors_v1_upgrade_actor_request, project, environment) + + +Upgrades a actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actor** | **uuid::Uuid** | The id of the actor to upgrade | [required] | +**actors_v1_upgrade_actor_request** | [**ActorsV1UpgradeActorRequest**](ActorsV1UpgradeActorRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## actors_v1_upgrade_all + +> crate::models::ActorsV1UpgradeAllActorsResponse actors_v1_upgrade_all(actors_v1_upgrade_all_actors_request, project, environment) + + +Upgrades all actors matching the given tags. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actors_v1_upgrade_all_actors_request** | [**ActorsV1UpgradeAllActorsRequest**](ActorsV1UpgradeAllActorsRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ActorsV1UpgradeAllActorsResponse**](ActorsV1UpgradeAllActorsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/full/rust/docs/ActorsV1CreateActorNetworkRequest.md b/sdks/api/full/rust/docs/ActorsV1CreateActorNetworkRequest.md new file mode 100644 index 0000000000..ede5462061 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1CreateActorNetworkRequest.md @@ -0,0 +1,13 @@ +# ActorsV1CreateActorNetworkRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**mode** | Option<[**crate::models::ActorsV1NetworkMode**](ActorsV1NetworkMode.md)> | | [optional] +**ports** | Option<[**::std::collections::HashMap**](ActorsV1CreateActorPortRequest.md)> | | [optional] +**wait_ready** | Option<**bool**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1CreateActorPortRequest.md b/sdks/api/full/rust/docs/ActorsV1CreateActorPortRequest.md new file mode 100644 index 0000000000..b7828a6234 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1CreateActorPortRequest.md @@ -0,0 +1,13 @@ +# ActorsV1CreateActorPortRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | [**crate::models::ActorsV1PortProtocol**](ActorsV1PortProtocol.md) | | +**internal_port** | Option<**i32**> | | [optional] +**routing** | Option<[**crate::models::ActorsV1PortRouting**](ActorsV1PortRouting.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1CreateActorRequest.md b/sdks/api/full/rust/docs/ActorsV1CreateActorRequest.md new file mode 100644 index 0000000000..57aad08fe9 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1CreateActorRequest.md @@ -0,0 +1,18 @@ +# ActorsV1CreateActorRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**region** | Option<**String**> | | [optional] +**tags** | Option<[**serde_json::Value**](.md)> | | +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] +**runtime** | Option<[**crate::models::ActorsV1CreateActorRuntimeRequest**](ActorsV1CreateActorRuntimeRequest.md)> | | [optional] +**network** | Option<[**crate::models::ActorsV1CreateActorNetworkRequest**](ActorsV1CreateActorNetworkRequest.md)> | | [optional] +**resources** | Option<[**crate::models::ActorsV1Resources**](ActorsV1Resources.md)> | | [optional] +**lifecycle** | Option<[**crate::models::ActorsV1Lifecycle**](ActorsV1Lifecycle.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1CreateActorResponse.md b/sdks/api/full/rust/docs/ActorsV1CreateActorResponse.md new file mode 100644 index 0000000000..ee94828008 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1CreateActorResponse.md @@ -0,0 +1,11 @@ +# ActorsV1CreateActorResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**actor** | [**crate::models::ActorsV1Actor**](ActorsV1Actor.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeNetworkRequest.md b/sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeNetworkRequest.md new file mode 100644 index 0000000000..21178ec62e --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeNetworkRequest.md @@ -0,0 +1,11 @@ +# ActorsV1CreateActorRuntimeNetworkRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**endpoint_type** | [**crate::models::ActorsV1EndpointType**](ActorsV1EndpointType.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeRequest.md b/sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeRequest.md new file mode 100644 index 0000000000..3bf86b3908 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1CreateActorRuntimeRequest.md @@ -0,0 +1,12 @@ +# ActorsV1CreateActorRuntimeRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**environment** | Option<**::std::collections::HashMap**> | | [optional] +**network** | Option<[**crate::models::ActorsV1CreateActorRuntimeNetworkRequest**](ActorsV1CreateActorRuntimeNetworkRequest.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1EndpointType.md b/sdks/api/full/rust/docs/ActorsV1EndpointType.md new file mode 100644 index 0000000000..6300d6cf5b --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1EndpointType.md @@ -0,0 +1,10 @@ +# ActorsV1EndpointType + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1GetActorLogsResponse.md b/sdks/api/full/rust/docs/ActorsV1GetActorLogsResponse.md new file mode 100644 index 0000000000..aa2ce3b105 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1GetActorLogsResponse.md @@ -0,0 +1,16 @@ +# ActorsV1GetActorLogsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**actor_ids** | **Vec** | List of actor IDs in these logs. The order of these correspond to the index in the log entry. | +**lines** | **Vec** | Sorted old to new. | +**timestamps** | **Vec** | Sorted old to new. | +**streams** | **Vec** | Streams the logs came from. 0 = stdout 1 = stderr | +**actor_indices** | **Vec** | Index of the actor that this log was for. Use this index to look the full ID in `actor_ids`. | +**watch** | [**crate::models::WatchResponse**](WatchResponse.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1GetActorMetricsResponse.md b/sdks/api/full/rust/docs/ActorsV1GetActorMetricsResponse.md new file mode 100644 index 0000000000..0079da0f0b --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1GetActorMetricsResponse.md @@ -0,0 +1,15 @@ +# ActorsV1GetActorMetricsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**actor_ids** | **Vec** | | +**metric_names** | **Vec** | | +**metric_attributes** | [**Vec<::std::collections::HashMap>**](map.md) | | +**metric_types** | **Vec** | | +**metric_values** | [**Vec>**](array.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1GetActorResponse.md b/sdks/api/full/rust/docs/ActorsV1GetActorResponse.md new file mode 100644 index 0000000000..f019d31efa --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1GetActorResponse.md @@ -0,0 +1,11 @@ +# ActorsV1GetActorResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**actor** | [**crate::models::ActorsV1Actor**](ActorsV1Actor.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1Lifecycle.md b/sdks/api/full/rust/docs/ActorsV1Lifecycle.md new file mode 100644 index 0000000000..f8efbb7d52 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1Lifecycle.md @@ -0,0 +1,12 @@ +# ActorsV1Lifecycle + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kill_timeout** | Option<**i64**> | The duration to wait for in milliseconds before killing the actor. This should be set to a safe default, and can be overridden during a DELETE request if needed. | [optional] +**durable** | Option<**bool**> | If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1ListActorsResponse.md b/sdks/api/full/rust/docs/ActorsV1ListActorsResponse.md new file mode 100644 index 0000000000..24b2724bae --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1ListActorsResponse.md @@ -0,0 +1,12 @@ +# ActorsV1ListActorsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**actors** | [**Vec**](ActorsV1Actor.md) | A list of actors for the project associated with the token. | +**pagination** | [**crate::models::Pagination**](Pagination.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1LogsApi.md b/sdks/api/full/rust/docs/ActorsV1LogsApi.md new file mode 100644 index 0000000000..f2708a3361 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1LogsApi.md @@ -0,0 +1,46 @@ +# \ActorsV1LogsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**actors_v1_logs_get**](ActorsV1LogsApi.md#actors_v1_logs_get) | **GET** /actors/logs | + + + +## actors_v1_logs_get + +> crate::models::ActorsV1GetActorLogsResponse actors_v1_logs_get(stream, actor_ids_json, project, environment, search_text, search_case_sensitive, search_enable_regex, watch_index) + + +Returns the logs for a given actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**stream** | [**ActorsV1QueryLogStream**](.md) | | [required] | +**actor_ids_json** | **String** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**search_text** | Option<**String**> | | | +**search_case_sensitive** | Option<**bool**> | | | +**search_enable_regex** | Option<**bool**> | | | +**watch_index** | Option<**String**> | A query parameter denoting the requests watch index. | | + +### Return type + +[**crate::models::ActorsV1GetActorLogsResponse**](ActorsV1GetActorLogsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/full/rust/docs/ActorsV1MetricsApi.md b/sdks/api/full/rust/docs/ActorsV1MetricsApi.md new file mode 100644 index 0000000000..fcd5105797 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1MetricsApi.md @@ -0,0 +1,44 @@ +# \ActorsV1MetricsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**actors_v1_metrics_get**](ActorsV1MetricsApi.md#actors_v1_metrics_get) | **GET** /actors/{actor}/metrics/history | + + + +## actors_v1_metrics_get + +> crate::models::ActorsV1GetActorMetricsResponse actors_v1_metrics_get(actor, start, end, interval, project, environment) + + +Returns the metrics for a given actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actor** | **uuid::Uuid** | The id of the actor to destroy | [required] | +**start** | **i32** | | [required] | +**end** | **i32** | | [required] | +**interval** | **i32** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ActorsV1GetActorMetricsResponse**](ActorsV1GetActorMetricsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/full/rust/docs/ActorsV1Network.md b/sdks/api/full/rust/docs/ActorsV1Network.md new file mode 100644 index 0000000000..3c361b902b --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1Network.md @@ -0,0 +1,12 @@ +# ActorsV1Network + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**mode** | [**crate::models::ActorsV1NetworkMode**](ActorsV1NetworkMode.md) | | +**ports** | [**::std::collections::HashMap**](ActorsV1Port.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1NetworkMode.md b/sdks/api/full/rust/docs/ActorsV1NetworkMode.md new file mode 100644 index 0000000000..d43278fce7 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1NetworkMode.md @@ -0,0 +1,10 @@ +# ActorsV1NetworkMode + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1Port.md b/sdks/api/full/rust/docs/ActorsV1Port.md new file mode 100644 index 0000000000..622a287cae --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1Port.md @@ -0,0 +1,17 @@ +# ActorsV1Port + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | [**crate::models::ActorsV1PortProtocol**](ActorsV1PortProtocol.md) | | +**internal_port** | Option<**i32**> | | [optional] +**hostname** | Option<**String**> | | [optional] +**port** | Option<**i32**> | | [optional] +**path** | Option<**String**> | | [optional] +**url** | Option<**String**> | Fully formed connection URL including protocol, hostname, port, and path, if applicable. | [optional] +**routing** | [**crate::models::ActorsV1PortRouting**](ActorsV1PortRouting.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1PortProtocol.md b/sdks/api/full/rust/docs/ActorsV1PortProtocol.md new file mode 100644 index 0000000000..b30e7d8242 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1PortProtocol.md @@ -0,0 +1,10 @@ +# ActorsV1PortProtocol + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1PortRouting.md b/sdks/api/full/rust/docs/ActorsV1PortRouting.md new file mode 100644 index 0000000000..35c2e4c1ef --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1PortRouting.md @@ -0,0 +1,12 @@ +# ActorsV1PortRouting + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**guard** | Option<[**serde_json::Value**](.md)> | | [optional] +**host** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1QueryLogStream.md b/sdks/api/full/rust/docs/ActorsV1QueryLogStream.md new file mode 100644 index 0000000000..0f30370967 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1QueryLogStream.md @@ -0,0 +1,10 @@ +# ActorsV1QueryLogStream + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsResources.md b/sdks/api/full/rust/docs/ActorsV1Resources.md similarity index 96% rename from sdks/api/full/rust/docs/ActorsResources.md rename to sdks/api/full/rust/docs/ActorsV1Resources.md index b5fa9ad812..77af9f6f2a 100644 --- a/sdks/api/full/rust/docs/ActorsResources.md +++ b/sdks/api/full/rust/docs/ActorsV1Resources.md @@ -1,4 +1,4 @@ -# ActorsResources +# ActorsV1Resources ## Properties diff --git a/sdks/api/full/rust/docs/ActorsV1Runtime.md b/sdks/api/full/rust/docs/ActorsV1Runtime.md new file mode 100644 index 0000000000..2db828ab04 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1Runtime.md @@ -0,0 +1,13 @@ +# ActorsV1Runtime + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**build** | [**uuid::Uuid**](uuid::Uuid.md) | | +**arguments** | Option<**Vec**> | | [optional] +**environment** | Option<**::std::collections::HashMap**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1UpgradeActorRequest.md b/sdks/api/full/rust/docs/ActorsV1UpgradeActorRequest.md new file mode 100644 index 0000000000..aef3ac62c6 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1UpgradeActorRequest.md @@ -0,0 +1,12 @@ +# ActorsV1UpgradeActorRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsRequest.md b/sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsRequest.md new file mode 100644 index 0000000000..ef24aca7fc --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsRequest.md @@ -0,0 +1,13 @@ +# ActorsV1UpgradeAllActorsRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**tags** | Option<[**serde_json::Value**](.md)> | | +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsResponse.md b/sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsResponse.md new file mode 100644 index 0000000000..1eabe9fe70 --- /dev/null +++ b/sdks/api/full/rust/docs/ActorsV1UpgradeAllActorsResponse.md @@ -0,0 +1,11 @@ +# ActorsV1UpgradeAllActorsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**count** | **i64** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersApi.md b/sdks/api/full/rust/docs/ContainersApi.md new file mode 100644 index 0000000000..78ad6e627f --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersApi.md @@ -0,0 +1,213 @@ +# \ContainersApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**containers_create**](ContainersApi.md#containers_create) | **POST** /v1/containers | +[**containers_destroy**](ContainersApi.md#containers_destroy) | **DELETE** /v1/containers/{container} | +[**containers_get**](ContainersApi.md#containers_get) | **GET** /v1/containers/{container} | +[**containers_list**](ContainersApi.md#containers_list) | **GET** /v1/containers | +[**containers_upgrade**](ContainersApi.md#containers_upgrade) | **POST** /v1/containers/{container}/upgrade | +[**containers_upgrade_all**](ContainersApi.md#containers_upgrade_all) | **POST** /v1/containers/upgrade | + + + +## containers_create + +> crate::models::ContainersCreateContainerResponse containers_create(containers_create_container_request, project, environment, endpoint_type) + + +Create a new container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**containers_create_container_request** | [**ContainersCreateContainerRequest**](ContainersCreateContainerRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ContainersEndpointType**](.md)> | | | + +### Return type + +[**crate::models::ContainersCreateContainerResponse**](ContainersCreateContainerResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_destroy + +> serde_json::Value containers_destroy(container, project, environment, override_kill_timeout) + + +Destroy a container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to destroy | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**override_kill_timeout** | Option<**i64**> | The duration to wait for in milliseconds before killing the container. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. | | + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_get + +> crate::models::ContainersGetContainerResponse containers_get(container, project, environment, endpoint_type) + + +Gets a container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to destroy | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ContainersEndpointType**](.md)> | | | + +### Return type + +[**crate::models::ContainersGetContainerResponse**](ContainersGetContainerResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_list + +> crate::models::ContainersListContainersResponse containers_list(project, environment, endpoint_type, tags_json, include_destroyed, cursor) + + +Lists all containers associated with the token used. Can be filtered by tags in the query string. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ContainersEndpointType**](.md)> | | | +**tags_json** | Option<**String**> | | | +**include_destroyed** | Option<**bool**> | | | +**cursor** | Option<**String**> | | | + +### Return type + +[**crate::models::ContainersListContainersResponse**](ContainersListContainersResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_upgrade + +> serde_json::Value containers_upgrade(container, containers_upgrade_container_request, project, environment) + + +Upgrades a container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to upgrade | [required] | +**containers_upgrade_container_request** | [**ContainersUpgradeContainerRequest**](ContainersUpgradeContainerRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_upgrade_all + +> crate::models::ContainersUpgradeAllContainersResponse containers_upgrade_all(containers_upgrade_all_containers_request, project, environment) + + +Upgrades all containers matching the given tags. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**containers_upgrade_all_containers_request** | [**ContainersUpgradeAllContainersRequest**](ContainersUpgradeAllContainersRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ContainersUpgradeAllContainersResponse**](ContainersUpgradeAllContainersResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/full/rust/docs/ContainersContainer.md b/sdks/api/full/rust/docs/ContainersContainer.md new file mode 100644 index 0000000000..d6d59a5979 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersContainer.md @@ -0,0 +1,20 @@ +# ContainersContainer + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | Can be a UUID or base36 encoded binary data. | +**region** | **String** | | +**tags** | Option<[**serde_json::Value**](.md)> | | +**runtime** | [**crate::models::ContainersRuntime**](ContainersRuntime.md) | | +**network** | [**crate::models::ContainersNetwork**](ContainersNetwork.md) | | +**resources** | [**crate::models::ContainersResources**](ContainersResources.md) | | +**lifecycle** | [**crate::models::ContainersLifecycle**](ContainersLifecycle.md) | | +**created_at** | **String** | RFC3339 timestamp | +**started_at** | Option<**String**> | RFC3339 timestamp | [optional] +**destroyed_at** | Option<**String**> | RFC3339 timestamp | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersCreateContainerNetworkRequest.md b/sdks/api/full/rust/docs/ContainersCreateContainerNetworkRequest.md new file mode 100644 index 0000000000..75819c3d43 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersCreateContainerNetworkRequest.md @@ -0,0 +1,13 @@ +# ContainersCreateContainerNetworkRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**mode** | Option<[**crate::models::ContainersNetworkMode**](ContainersNetworkMode.md)> | | [optional] +**ports** | Option<[**::std::collections::HashMap**](ContainersCreateContainerPortRequest.md)> | | [optional] +**wait_ready** | Option<**bool**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersCreateContainerPortRequest.md b/sdks/api/full/rust/docs/ContainersCreateContainerPortRequest.md new file mode 100644 index 0000000000..1b0e5932e5 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersCreateContainerPortRequest.md @@ -0,0 +1,13 @@ +# ContainersCreateContainerPortRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | [**crate::models::ContainersPortProtocol**](ContainersPortProtocol.md) | | +**internal_port** | Option<**i32**> | | [optional] +**routing** | Option<[**crate::models::ContainersPortRouting**](ContainersPortRouting.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersCreateContainerRequest.md b/sdks/api/full/rust/docs/ContainersCreateContainerRequest.md new file mode 100644 index 0000000000..0eb6be73e2 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersCreateContainerRequest.md @@ -0,0 +1,18 @@ +# ContainersCreateContainerRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**region** | Option<**String**> | | [optional] +**tags** | Option<[**serde_json::Value**](.md)> | | +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] +**runtime** | Option<[**crate::models::ContainersCreateContainerRuntimeRequest**](ContainersCreateContainerRuntimeRequest.md)> | | [optional] +**network** | Option<[**crate::models::ContainersCreateContainerNetworkRequest**](ContainersCreateContainerNetworkRequest.md)> | | [optional] +**resources** | [**crate::models::ContainersResources**](ContainersResources.md) | | +**lifecycle** | Option<[**crate::models::ContainersLifecycle**](ContainersLifecycle.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersCreateContainerResponse.md b/sdks/api/full/rust/docs/ContainersCreateContainerResponse.md new file mode 100644 index 0000000000..cb0111c5b1 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersCreateContainerResponse.md @@ -0,0 +1,11 @@ +# ContainersCreateContainerResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container** | [**crate::models::ContainersContainer**](ContainersContainer.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md b/sdks/api/full/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md new file mode 100644 index 0000000000..d724ce6351 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md @@ -0,0 +1,11 @@ +# ContainersCreateContainerRuntimeNetworkRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**endpoint_type** | [**crate::models::ContainersEndpointType**](ContainersEndpointType.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersCreateContainerRuntimeRequest.md b/sdks/api/full/rust/docs/ContainersCreateContainerRuntimeRequest.md new file mode 100644 index 0000000000..0873cd62be --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersCreateContainerRuntimeRequest.md @@ -0,0 +1,12 @@ +# ContainersCreateContainerRuntimeRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**environment** | Option<**::std::collections::HashMap**> | | [optional] +**network** | Option<[**crate::models::ContainersCreateContainerRuntimeNetworkRequest**](ContainersCreateContainerRuntimeNetworkRequest.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersEndpointType.md b/sdks/api/full/rust/docs/ContainersEndpointType.md new file mode 100644 index 0000000000..62e04640ae --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersEndpointType.md @@ -0,0 +1,10 @@ +# ContainersEndpointType + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersGetContainerLogsResponse.md b/sdks/api/full/rust/docs/ContainersGetContainerLogsResponse.md new file mode 100644 index 0000000000..ff92071c09 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersGetContainerLogsResponse.md @@ -0,0 +1,17 @@ +# ContainersGetContainerLogsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container_ids** | **Vec** | List of container IDs in these logs. The order of these correspond to the index in the log entry. | +**lines** | **Vec** | Sorted old to new. | +**timestamps** | **Vec** | Sorted old to new. | +**streams** | **Vec** | Streams the logs came from. 0 = stdout 1 = stderr | +**foreigns** | **Vec** | List of flags denoting if this log is not directly from the container. | +**container_indices** | **Vec** | Index of the container that this log was for. Use this index to look the full ID in `container_ids`. | +**watch** | [**crate::models::WatchResponse**](WatchResponse.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersGetContainerMetricsResponse.md b/sdks/api/full/rust/docs/ContainersGetContainerMetricsResponse.md new file mode 100644 index 0000000000..c6c0c87b35 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersGetContainerMetricsResponse.md @@ -0,0 +1,15 @@ +# ContainersGetContainerMetricsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container_ids** | **Vec** | | +**metric_names** | **Vec** | | +**metric_attributes** | [**Vec<::std::collections::HashMap>**](map.md) | | +**metric_types** | **Vec** | | +**metric_values** | [**Vec>**](array.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersGetContainerResponse.md b/sdks/api/full/rust/docs/ContainersGetContainerResponse.md new file mode 100644 index 0000000000..5cc164ff76 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersGetContainerResponse.md @@ -0,0 +1,11 @@ +# ContainersGetContainerResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container** | [**crate::models::ContainersContainer**](ContainersContainer.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersLifecycle.md b/sdks/api/full/rust/docs/ContainersLifecycle.md new file mode 100644 index 0000000000..6e5dc7cef0 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersLifecycle.md @@ -0,0 +1,12 @@ +# ContainersLifecycle + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kill_timeout** | Option<**i64**> | The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. | [optional] +**durable** | Option<**bool**> | If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersListContainersResponse.md b/sdks/api/full/rust/docs/ContainersListContainersResponse.md new file mode 100644 index 0000000000..c1a4119e0d --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersListContainersResponse.md @@ -0,0 +1,12 @@ +# ContainersListContainersResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**containers** | [**Vec**](ContainersContainer.md) | A list of containers for the project associated with the token. | +**pagination** | [**crate::models::Pagination**](Pagination.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersLogsApi.md b/sdks/api/full/rust/docs/ContainersLogsApi.md new file mode 100644 index 0000000000..c4c1c3f5cb --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersLogsApi.md @@ -0,0 +1,46 @@ +# \ContainersLogsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**containers_logs_get**](ContainersLogsApi.md#containers_logs_get) | **GET** /v1/containers/logs | + + + +## containers_logs_get + +> crate::models::ContainersGetContainerLogsResponse containers_logs_get(stream, container_ids_json, project, environment, search_text, search_case_sensitive, search_enable_regex, watch_index) + + +Returns the logs for a given container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**stream** | [**ContainersQueryLogStream**](.md) | | [required] | +**container_ids_json** | **String** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**search_text** | Option<**String**> | | | +**search_case_sensitive** | Option<**bool**> | | | +**search_enable_regex** | Option<**bool**> | | | +**watch_index** | Option<**String**> | A query parameter denoting the requests watch index. | | + +### Return type + +[**crate::models::ContainersGetContainerLogsResponse**](ContainersGetContainerLogsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/full/rust/docs/ContainersMetricsApi.md b/sdks/api/full/rust/docs/ContainersMetricsApi.md new file mode 100644 index 0000000000..0d08a99321 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersMetricsApi.md @@ -0,0 +1,44 @@ +# \ContainersMetricsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**containers_metrics_get**](ContainersMetricsApi.md#containers_metrics_get) | **GET** /v1/containers/{container}/metrics/history | + + + +## containers_metrics_get + +> crate::models::ContainersGetContainerMetricsResponse containers_metrics_get(container, start, end, interval, project, environment) + + +Returns the metrics for a given container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to destroy | [required] | +**start** | **i32** | | [required] | +**end** | **i32** | | [required] | +**interval** | **i32** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ContainersGetContainerMetricsResponse**](ContainersGetContainerMetricsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/full/rust/docs/ContainersNetwork.md b/sdks/api/full/rust/docs/ContainersNetwork.md new file mode 100644 index 0000000000..41b81ebb4f --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersNetwork.md @@ -0,0 +1,12 @@ +# ContainersNetwork + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**mode** | [**crate::models::ContainersNetworkMode**](ContainersNetworkMode.md) | | +**ports** | [**::std::collections::HashMap**](ContainersPort.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersNetworkMode.md b/sdks/api/full/rust/docs/ContainersNetworkMode.md new file mode 100644 index 0000000000..bc4a4e115b --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersNetworkMode.md @@ -0,0 +1,10 @@ +# ContainersNetworkMode + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersPort.md b/sdks/api/full/rust/docs/ContainersPort.md new file mode 100644 index 0000000000..46ace3b43e --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersPort.md @@ -0,0 +1,17 @@ +# ContainersPort + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | [**crate::models::ContainersPortProtocol**](ContainersPortProtocol.md) | | +**internal_port** | Option<**i32**> | | [optional] +**hostname** | Option<**String**> | | [optional] +**port** | Option<**i32**> | | [optional] +**path** | Option<**String**> | | [optional] +**url** | Option<**String**> | Fully formed connection URL including protocol, hostname, port, and path, if applicable. | [optional] +**routing** | [**crate::models::ContainersPortRouting**](ContainersPortRouting.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersPortProtocol.md b/sdks/api/full/rust/docs/ContainersPortProtocol.md new file mode 100644 index 0000000000..a69a314e54 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersPortProtocol.md @@ -0,0 +1,10 @@ +# ContainersPortProtocol + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersPortRouting.md b/sdks/api/full/rust/docs/ContainersPortRouting.md new file mode 100644 index 0000000000..dd0c620964 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersPortRouting.md @@ -0,0 +1,12 @@ +# ContainersPortRouting + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**guard** | Option<[**serde_json::Value**](.md)> | | [optional] +**host** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersQueryLogStream.md b/sdks/api/full/rust/docs/ContainersQueryLogStream.md new file mode 100644 index 0000000000..84b7004e27 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersQueryLogStream.md @@ -0,0 +1,10 @@ +# ContainersQueryLogStream + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ActorsResources.md b/sdks/api/full/rust/docs/ContainersResources.md similarity index 95% rename from sdks/api/runtime/rust/docs/ActorsResources.md rename to sdks/api/full/rust/docs/ContainersResources.md index b5fa9ad812..a32f979d17 100644 --- a/sdks/api/runtime/rust/docs/ActorsResources.md +++ b/sdks/api/full/rust/docs/ContainersResources.md @@ -1,4 +1,4 @@ -# ActorsResources +# ContainersResources ## Properties diff --git a/sdks/api/full/rust/docs/ContainersRuntime.md b/sdks/api/full/rust/docs/ContainersRuntime.md new file mode 100644 index 0000000000..ea30224b4b --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersRuntime.md @@ -0,0 +1,13 @@ +# ContainersRuntime + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**build** | [**uuid::Uuid**](uuid::Uuid.md) | | +**arguments** | Option<**Vec**> | | [optional] +**environment** | Option<**::std::collections::HashMap**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersUpgradeAllContainersRequest.md b/sdks/api/full/rust/docs/ContainersUpgradeAllContainersRequest.md new file mode 100644 index 0000000000..bd912472ca --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersUpgradeAllContainersRequest.md @@ -0,0 +1,13 @@ +# ContainersUpgradeAllContainersRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**tags** | Option<[**serde_json::Value**](.md)> | | +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersUpgradeAllContainersResponse.md b/sdks/api/full/rust/docs/ContainersUpgradeAllContainersResponse.md new file mode 100644 index 0000000000..04eb18d657 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersUpgradeAllContainersResponse.md @@ -0,0 +1,11 @@ +# ContainersUpgradeAllContainersResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**count** | **i64** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/docs/ContainersUpgradeContainerRequest.md b/sdks/api/full/rust/docs/ContainersUpgradeContainerRequest.md new file mode 100644 index 0000000000..558addcae0 --- /dev/null +++ b/sdks/api/full/rust/docs/ContainersUpgradeContainerRequest.md @@ -0,0 +1,12 @@ +# ContainersUpgradeContainerRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/full/rust/src/apis/actors_api.rs b/sdks/api/full/rust/src/apis/actors_api.rs index bf00b1b401..f64351bd09 100644 --- a/sdks/api/full/rust/src/apis/actors_api.rs +++ b/sdks/api/full/rust/src/apis/actors_api.rs @@ -129,7 +129,7 @@ pub async fn actors_create( let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); @@ -187,7 +187,7 @@ pub async fn actors_destroy( let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!( - "{}/actors/{actor}", + "{}/v2/actors/{actor}", local_var_configuration.base_path, actor = crate::apis::urlencode(actor) ); @@ -247,7 +247,7 @@ pub async fn actors_get( let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!( - "{}/actors/{actor}", + "{}/v2/actors/{actor}", local_var_configuration.base_path, actor = crate::apis::urlencode(actor) ); @@ -308,7 +308,7 @@ pub async fn actors_list( let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); @@ -437,7 +437,7 @@ pub async fn actors_upgrade( let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!( - "{}/actors/{actor}/upgrade", + "{}/v2/actors/{actor}/upgrade", local_var_configuration.base_path, actor = crate::apis::urlencode(actor) ); @@ -492,7 +492,7 @@ pub async fn actors_upgrade_all( let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/upgrade", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors/upgrade", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); diff --git a/sdks/api/full/rust/src/apis/actors_logs_api.rs b/sdks/api/full/rust/src/apis/actors_logs_api.rs index bf0466f1a4..83b94e6845 100644 --- a/sdks/api/full/rust/src/apis/actors_logs_api.rs +++ b/sdks/api/full/rust/src/apis/actors_logs_api.rs @@ -93,7 +93,7 @@ pub async fn actors_logs_get( let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/logs", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors/logs", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); diff --git a/sdks/api/full/rust/src/apis/actors_metrics_api.rs b/sdks/api/full/rust/src/apis/actors_metrics_api.rs index 6244579e8d..dc9d8a7302 100644 --- a/sdks/api/full/rust/src/apis/actors_metrics_api.rs +++ b/sdks/api/full/rust/src/apis/actors_metrics_api.rs @@ -41,7 +41,7 @@ pub async fn actors_metrics_get( let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!( - "{}/actors/{actor}/metrics/history", + "{}/v2/actors/{actor}/metrics/history", local_var_configuration.base_path, actor = crate::apis::urlencode(actor) ); diff --git a/sdks/api/full/rust/src/apis/actors_v1_api.rs b/sdks/api/full/rust/src/apis/actors_v1_api.rs new file mode 100644 index 0000000000..8f5799662f --- /dev/null +++ b/sdks/api/full/rust/src/apis/actors_v1_api.rs @@ -0,0 +1,448 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use reqwest; + +use super::{configuration, Error}; +use crate::apis::ResponseContent; + +/// struct for typed errors of method [`actors_v1_create`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1CreateError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`actors_v1_destroy`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1DestroyError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`actors_v1_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1GetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`actors_v1_list`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1ListError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`actors_v1_upgrade`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1UpgradeError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`actors_v1_upgrade_all`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1UpgradeAllError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// Create a new actor. +pub async fn actors_v1_create( + configuration: &configuration::Configuration, + actors_v1_create_actor_request: crate::models::ActorsV1CreateActorRequest, + project: Option<&str>, + environment: Option<&str>, + endpoint_type: Option, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/actors", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = + local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&actors_v1_create_actor_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Destroy a actor. +pub async fn actors_v1_destroy( + configuration: &configuration::Configuration, + actor: &str, + project: Option<&str>, + environment: Option<&str>, + override_kill_timeout: Option, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/actors/{actor}", + local_var_configuration.base_path, + actor = crate::apis::urlencode(actor) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = override_kill_timeout { + local_var_req_builder = + local_var_req_builder.query(&[("override_kill_timeout", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Gets a actor. +pub async fn actors_v1_get( + configuration: &configuration::Configuration, + actor: &str, + project: Option<&str>, + environment: Option<&str>, + endpoint_type: Option, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/actors/{actor}", + local_var_configuration.base_path, + actor = crate::apis::urlencode(actor) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = + local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Lists all actors associated with the token used. Can be filtered by tags in the query string. +pub async fn actors_v1_list( + configuration: &configuration::Configuration, + project: Option<&str>, + environment: Option<&str>, + endpoint_type: Option, + tags_json: Option<&str>, + include_destroyed: Option, + cursor: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/actors", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = + local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = tags_json { + local_var_req_builder = + local_var_req_builder.query(&[("tags_json", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = include_destroyed { + local_var_req_builder = + local_var_req_builder.query(&[("include_destroyed", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = cursor { + local_var_req_builder = + local_var_req_builder.query(&[("cursor", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Upgrades a actor. +pub async fn actors_v1_upgrade( + configuration: &configuration::Configuration, + actor: &str, + actors_v1_upgrade_actor_request: crate::models::ActorsV1UpgradeActorRequest, + project: Option<&str>, + environment: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/actors/{actor}/upgrade", + local_var_configuration.base_path, + actor = crate::apis::urlencode(actor) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&actors_v1_upgrade_actor_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Upgrades all actors matching the given tags. +pub async fn actors_v1_upgrade_all( + configuration: &configuration::Configuration, + actors_v1_upgrade_all_actors_request: crate::models::ActorsV1UpgradeAllActorsRequest, + project: Option<&str>, + environment: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/actors/upgrade", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&actors_v1_upgrade_all_actors_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} diff --git a/sdks/api/full/rust/src/apis/actors_v1_logs_api.rs b/sdks/api/full/rust/src/apis/actors_v1_logs_api.rs new file mode 100644 index 0000000000..bfcb799ae3 --- /dev/null +++ b/sdks/api/full/rust/src/apis/actors_v1_logs_api.rs @@ -0,0 +1,102 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use reqwest; + +use super::{configuration, Error}; +use crate::apis::ResponseContent; + +/// struct for typed errors of method [`actors_v1_logs_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1LogsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// Returns the logs for a given actor. +pub async fn actors_v1_logs_get( + configuration: &configuration::Configuration, + stream: crate::models::ActorsV1QueryLogStream, + actor_ids_json: &str, + project: Option<&str>, + environment: Option<&str>, + search_text: Option<&str>, + search_case_sensitive: Option, + search_enable_regex: Option, + watch_index: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/actors/logs", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("stream", &stream.to_string())]); + local_var_req_builder = + local_var_req_builder.query(&[("actor_ids_json", &actor_ids_json.to_string())]); + if let Some(ref local_var_str) = search_text { + local_var_req_builder = + local_var_req_builder.query(&[("search_text", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = search_case_sensitive { + local_var_req_builder = + local_var_req_builder.query(&[("search_case_sensitive", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = search_enable_regex { + local_var_req_builder = + local_var_req_builder.query(&[("search_enable_regex", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = watch_index { + local_var_req_builder = + local_var_req_builder.query(&[("watch_index", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} diff --git a/sdks/api/full/rust/src/apis/actors_v1_metrics_api.rs b/sdks/api/full/rust/src/apis/actors_v1_metrics_api.rs new file mode 100644 index 0000000000..265fbb301a --- /dev/null +++ b/sdks/api/full/rust/src/apis/actors_v1_metrics_api.rs @@ -0,0 +1,88 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use reqwest; + +use super::{configuration, Error}; +use crate::apis::ResponseContent; + +/// struct for typed errors of method [`actors_v1_metrics_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsV1MetricsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// Returns the metrics for a given actor. +pub async fn actors_v1_metrics_get( + configuration: &configuration::Configuration, + actor: &str, + start: i32, + end: i32, + interval: i32, + project: Option<&str>, + environment: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/actors/{actor}/metrics/history", + local_var_configuration.base_path, + actor = crate::apis::urlencode(actor) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("start", &start.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("end", &end.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("interval", &interval.to_string())]); + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} diff --git a/sdks/api/full/rust/src/apis/containers_api.rs b/sdks/api/full/rust/src/apis/containers_api.rs new file mode 100644 index 0000000000..430936d4dd --- /dev/null +++ b/sdks/api/full/rust/src/apis/containers_api.rs @@ -0,0 +1,452 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use reqwest; + +use super::{configuration, Error}; +use crate::apis::ResponseContent; + +/// struct for typed errors of method [`containers_create`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersCreateError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_destroy`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersDestroyError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_list`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersListError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_upgrade`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersUpgradeError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_upgrade_all`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersUpgradeAllError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// Create a new container. +pub async fn containers_create( + configuration: &configuration::Configuration, + containers_create_container_request: crate::models::ContainersCreateContainerRequest, + project: Option<&str>, + environment: Option<&str>, + endpoint_type: Option, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = + local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&containers_create_container_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Destroy a container. +pub async fn containers_destroy( + configuration: &configuration::Configuration, + container: &str, + project: Option<&str>, + environment: Option<&str>, + override_kill_timeout: Option, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/v1/containers/{container}", + local_var_configuration.base_path, + container = crate::apis::urlencode(container) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = override_kill_timeout { + local_var_req_builder = + local_var_req_builder.query(&[("override_kill_timeout", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Gets a container. +pub async fn containers_get( + configuration: &configuration::Configuration, + container: &str, + project: Option<&str>, + environment: Option<&str>, + endpoint_type: Option, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/v1/containers/{container}", + local_var_configuration.base_path, + container = crate::apis::urlencode(container) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = + local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Lists all containers associated with the token used. Can be filtered by tags in the query string. +pub async fn containers_list( + configuration: &configuration::Configuration, + project: Option<&str>, + environment: Option<&str>, + endpoint_type: Option, + tags_json: Option<&str>, + include_destroyed: Option, + cursor: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = + local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = tags_json { + local_var_req_builder = + local_var_req_builder.query(&[("tags_json", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = include_destroyed { + local_var_req_builder = + local_var_req_builder.query(&[("include_destroyed", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = cursor { + local_var_req_builder = + local_var_req_builder.query(&[("cursor", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Upgrades a container. +pub async fn containers_upgrade( + configuration: &configuration::Configuration, + container: &str, + containers_upgrade_container_request: crate::models::ContainersUpgradeContainerRequest, + project: Option<&str>, + environment: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/v1/containers/{container}/upgrade", + local_var_configuration.base_path, + container = crate::apis::urlencode(container) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&containers_upgrade_container_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Upgrades all containers matching the given tags. +pub async fn containers_upgrade_all( + configuration: &configuration::Configuration, + containers_upgrade_all_containers_request: crate::models::ContainersUpgradeAllContainersRequest, + project: Option<&str>, + environment: Option<&str>, +) -> Result> +{ + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/v1/containers/upgrade", + local_var_configuration.base_path + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&containers_upgrade_all_containers_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} diff --git a/sdks/api/full/rust/src/apis/containers_logs_api.rs b/sdks/api/full/rust/src/apis/containers_logs_api.rs new file mode 100644 index 0000000000..6c8799b9ae --- /dev/null +++ b/sdks/api/full/rust/src/apis/containers_logs_api.rs @@ -0,0 +1,102 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use reqwest; + +use super::{configuration, Error}; +use crate::apis::ResponseContent; + +/// struct for typed errors of method [`containers_logs_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersLogsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// Returns the logs for a given container. +pub async fn containers_logs_get( + configuration: &configuration::Configuration, + stream: crate::models::ContainersQueryLogStream, + container_ids_json: &str, + project: Option<&str>, + environment: Option<&str>, + search_text: Option<&str>, + search_case_sensitive: Option, + search_enable_regex: Option, + watch_index: Option<&str>, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/logs", local_var_configuration.base_path); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("stream", &stream.to_string())]); + local_var_req_builder = + local_var_req_builder.query(&[("container_ids_json", &container_ids_json.to_string())]); + if let Some(ref local_var_str) = search_text { + local_var_req_builder = + local_var_req_builder.query(&[("search_text", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = search_case_sensitive { + local_var_req_builder = + local_var_req_builder.query(&[("search_case_sensitive", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = search_enable_regex { + local_var_req_builder = + local_var_req_builder.query(&[("search_enable_regex", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = watch_index { + local_var_req_builder = + local_var_req_builder.query(&[("watch_index", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} diff --git a/sdks/api/full/rust/src/apis/containers_metrics_api.rs b/sdks/api/full/rust/src/apis/containers_metrics_api.rs new file mode 100644 index 0000000000..7e7b2c0a56 --- /dev/null +++ b/sdks/api/full/rust/src/apis/containers_metrics_api.rs @@ -0,0 +1,89 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +use reqwest; + +use super::{configuration, Error}; +use crate::apis::ResponseContent; + +/// struct for typed errors of method [`containers_metrics_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersMetricsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// Returns the metrics for a given container. +pub async fn containers_metrics_get( + configuration: &configuration::Configuration, + container: &str, + start: i32, + end: i32, + interval: i32, + project: Option<&str>, + environment: Option<&str>, +) -> Result> +{ + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/v1/containers/{container}/metrics/history", + local_var_configuration.base_path, + container = crate::apis::urlencode(container) + ); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = + local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = + local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("start", &start.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("end", &end.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("interval", &interval.to_string())]); + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} diff --git a/sdks/api/full/rust/src/apis/mod.rs b/sdks/api/full/rust/src/apis/mod.rs index c326b5a161..d26e9d10e4 100644 --- a/sdks/api/full/rust/src/apis/mod.rs +++ b/sdks/api/full/rust/src/apis/mod.rs @@ -115,6 +115,9 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String pub mod actors_api; pub mod actors_logs_api; pub mod actors_metrics_api; +pub mod actors_v1_api; +pub mod actors_v1_logs_api; +pub mod actors_v1_metrics_api; pub mod auth_identity_email_api; pub mod auth_tokens_api; pub mod builds_api; @@ -135,6 +138,9 @@ pub mod cloud_groups_api; pub mod cloud_logs_api; pub mod cloud_tiers_api; pub mod cloud_uploads_api; +pub mod containers_api; +pub mod containers_logs_api; +pub mod containers_metrics_api; pub mod core_intercom_pegboard_api; pub mod edge_intercom_pegboard_api; pub mod games_environments_tokens_api; diff --git a/sdks/api/full/rust/src/models/actors_actor.rs b/sdks/api/full/rust/src/models/actors_actor.rs index 21168917c1..969702f2d9 100644 --- a/sdks/api/full/rust/src/models/actors_actor.rs +++ b/sdks/api/full/rust/src/models/actors_actor.rs @@ -21,8 +21,6 @@ pub struct ActorsActor { pub runtime: Box, #[serde(rename = "network")] pub network: Box, - #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] - pub resources: Option>, #[serde(rename = "lifecycle")] pub lifecycle: Box, /// RFC3339 timestamp @@ -52,7 +50,6 @@ impl ActorsActor { tags, runtime: Box::new(runtime), network: Box::new(network), - resources: None, lifecycle: Box::new(lifecycle), created_at, started_at: None, diff --git a/sdks/api/full/rust/src/models/actors_create_actor_request.rs b/sdks/api/full/rust/src/models/actors_create_actor_request.rs index affb605c0c..f585c3dfe9 100644 --- a/sdks/api/full/rust/src/models/actors_create_actor_request.rs +++ b/sdks/api/full/rust/src/models/actors_create_actor_request.rs @@ -27,8 +27,6 @@ pub struct ActorsCreateActorRequest { pub runtime: Option>, #[serde(rename = "network", skip_serializing_if = "Option::is_none")] pub network: Option>, - #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] - pub resources: Option>, #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] pub lifecycle: Option>, } @@ -42,7 +40,6 @@ impl ActorsCreateActorRequest { build_tags: None, runtime: None, network: None, - resources: None, lifecycle: None, } } diff --git a/sdks/api/full/rust/src/models/actors_v1_actor.rs b/sdks/api/full/rust/src/models/actors_v1_actor.rs new file mode 100644 index 0000000000..3a37e62b43 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_actor.rs @@ -0,0 +1,61 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1Actor { + #[serde(rename = "id")] + pub id: uuid::Uuid, + #[serde(rename = "region")] + pub region: String, + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "runtime")] + pub runtime: Box, + #[serde(rename = "network")] + pub network: Box, + #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] + pub resources: Option>, + #[serde(rename = "lifecycle")] + pub lifecycle: Box, + /// RFC3339 timestamp + #[serde(rename = "created_at")] + pub created_at: String, + /// RFC3339 timestamp + #[serde(rename = "started_at", skip_serializing_if = "Option::is_none")] + pub started_at: Option, + /// RFC3339 timestamp + #[serde(rename = "destroyed_at", skip_serializing_if = "Option::is_none")] + pub destroyed_at: Option, +} + +impl ActorsV1Actor { + pub fn new( + id: uuid::Uuid, + region: String, + tags: Option, + runtime: crate::models::ActorsV1Runtime, + network: crate::models::ActorsV1Network, + lifecycle: crate::models::ActorsV1Lifecycle, + created_at: String, + ) -> ActorsV1Actor { + ActorsV1Actor { + id, + region, + tags, + runtime: Box::new(runtime), + network: Box::new(network), + resources: None, + lifecycle: Box::new(lifecycle), + created_at, + started_at: None, + destroyed_at: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_create_actor_network_request.rs b/sdks/api/full/rust/src/models/actors_v1_create_actor_network_request.rs new file mode 100644 index 0000000000..3fddab22a0 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_create_actor_network_request.rs @@ -0,0 +1,30 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1CreateActorNetworkRequest { + #[serde(rename = "mode", skip_serializing_if = "Option::is_none")] + pub mode: Option, + #[serde(rename = "ports", skip_serializing_if = "Option::is_none")] + pub ports: + Option<::std::collections::HashMap>, + #[serde(rename = "wait_ready", skip_serializing_if = "Option::is_none")] + pub wait_ready: Option, +} + +impl ActorsV1CreateActorNetworkRequest { + pub fn new() -> ActorsV1CreateActorNetworkRequest { + ActorsV1CreateActorNetworkRequest { + mode: None, + ports: None, + wait_ready: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_create_actor_port_request.rs b/sdks/api/full/rust/src/models/actors_v1_create_actor_port_request.rs new file mode 100644 index 0000000000..b99b47c753 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_create_actor_port_request.rs @@ -0,0 +1,29 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1CreateActorPortRequest { + #[serde(rename = "protocol")] + pub protocol: crate::models::ActorsV1PortProtocol, + #[serde(rename = "internal_port", skip_serializing_if = "Option::is_none")] + pub internal_port: Option, + #[serde(rename = "routing", skip_serializing_if = "Option::is_none")] + pub routing: Option>, +} + +impl ActorsV1CreateActorPortRequest { + pub fn new(protocol: crate::models::ActorsV1PortProtocol) -> ActorsV1CreateActorPortRequest { + ActorsV1CreateActorPortRequest { + protocol, + internal_port: None, + routing: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_create_actor_request.rs b/sdks/api/full/rust/src/models/actors_v1_create_actor_request.rs new file mode 100644 index 0000000000..6badd26a0d --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_create_actor_request.rs @@ -0,0 +1,49 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1CreateActorRequest { + #[serde(rename = "region", skip_serializing_if = "Option::is_none")] + pub region: Option, + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde( + rename = "build_tags", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub build_tags: Option>, + #[serde(rename = "runtime", skip_serializing_if = "Option::is_none")] + pub runtime: Option>, + #[serde(rename = "network", skip_serializing_if = "Option::is_none")] + pub network: Option>, + #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] + pub resources: Option>, + #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] + pub lifecycle: Option>, +} + +impl ActorsV1CreateActorRequest { + pub fn new(tags: Option) -> ActorsV1CreateActorRequest { + ActorsV1CreateActorRequest { + region: None, + tags, + build: None, + build_tags: None, + runtime: None, + network: None, + resources: None, + lifecycle: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_create_actor_response.rs b/sdks/api/full/rust/src/models/actors_v1_create_actor_response.rs new file mode 100644 index 0000000000..7a7b63d421 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_create_actor_response.rs @@ -0,0 +1,23 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1CreateActorResponse { + #[serde(rename = "actor")] + pub actor: Box, +} + +impl ActorsV1CreateActorResponse { + pub fn new(actor: crate::models::ActorsV1Actor) -> ActorsV1CreateActorResponse { + ActorsV1CreateActorResponse { + actor: Box::new(actor), + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_network_request.rs b/sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_network_request.rs new file mode 100644 index 0000000000..f0b99740d2 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_network_request.rs @@ -0,0 +1,23 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1CreateActorRuntimeNetworkRequest { + #[serde(rename = "endpoint_type")] + pub endpoint_type: crate::models::ActorsV1EndpointType, +} + +impl ActorsV1CreateActorRuntimeNetworkRequest { + pub fn new( + endpoint_type: crate::models::ActorsV1EndpointType, + ) -> ActorsV1CreateActorRuntimeNetworkRequest { + ActorsV1CreateActorRuntimeNetworkRequest { endpoint_type } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_request.rs b/sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_request.rs new file mode 100644 index 0000000000..c27601f95a --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_create_actor_runtime_request.rs @@ -0,0 +1,26 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1CreateActorRuntimeRequest { + #[serde(rename = "environment", skip_serializing_if = "Option::is_none")] + pub environment: Option<::std::collections::HashMap>, + #[serde(rename = "network", skip_serializing_if = "Option::is_none")] + pub network: Option>, +} + +impl ActorsV1CreateActorRuntimeRequest { + pub fn new() -> ActorsV1CreateActorRuntimeRequest { + ActorsV1CreateActorRuntimeRequest { + environment: None, + network: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_endpoint_type.rs b/sdks/api/full/rust/src/models/actors_v1_endpoint_type.rs new file mode 100644 index 0000000000..eb0e86ca45 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_endpoint_type.rs @@ -0,0 +1,33 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ActorsV1EndpointType { + #[serde(rename = "hostname")] + Hostname, + #[serde(rename = "path")] + Path, +} + +impl ToString for ActorsV1EndpointType { + fn to_string(&self) -> String { + match self { + Self::Hostname => String::from("hostname"), + Self::Path => String::from("path"), + } + } +} + +impl Default for ActorsV1EndpointType { + fn default() -> ActorsV1EndpointType { + Self::Hostname + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_get_actor_logs_response.rs b/sdks/api/full/rust/src/models/actors_v1_get_actor_logs_response.rs new file mode 100644 index 0000000000..8082e53e0e --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_get_actor_logs_response.rs @@ -0,0 +1,50 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1GetActorLogsResponse { + /// List of actor IDs in these logs. The order of these correspond to the index in the log entry. + #[serde(rename = "actor_ids")] + pub actor_ids: Vec, + /// Sorted old to new. + #[serde(rename = "lines")] + pub lines: Vec, + /// Sorted old to new. + #[serde(rename = "timestamps")] + pub timestamps: Vec, + /// Streams the logs came from. 0 = stdout 1 = stderr + #[serde(rename = "streams")] + pub streams: Vec, + /// Index of the actor that this log was for. Use this index to look the full ID in `actor_ids`. + #[serde(rename = "actor_indices")] + pub actor_indices: Vec, + #[serde(rename = "watch")] + pub watch: Box, +} + +impl ActorsV1GetActorLogsResponse { + pub fn new( + actor_ids: Vec, + lines: Vec, + timestamps: Vec, + streams: Vec, + actor_indices: Vec, + watch: crate::models::WatchResponse, + ) -> ActorsV1GetActorLogsResponse { + ActorsV1GetActorLogsResponse { + actor_ids, + lines, + timestamps, + streams, + actor_indices, + watch: Box::new(watch), + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_get_actor_metrics_response.rs b/sdks/api/full/rust/src/models/actors_v1_get_actor_metrics_response.rs new file mode 100644 index 0000000000..64a95da004 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_get_actor_metrics_response.rs @@ -0,0 +1,41 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1GetActorMetricsResponse { + #[serde(rename = "actor_ids")] + pub actor_ids: Vec, + #[serde(rename = "metric_names")] + pub metric_names: Vec, + #[serde(rename = "metric_attributes")] + pub metric_attributes: Vec<::std::collections::HashMap>, + #[serde(rename = "metric_types")] + pub metric_types: Vec, + #[serde(rename = "metric_values")] + pub metric_values: Vec>, +} + +impl ActorsV1GetActorMetricsResponse { + pub fn new( + actor_ids: Vec, + metric_names: Vec, + metric_attributes: Vec<::std::collections::HashMap>, + metric_types: Vec, + metric_values: Vec>, + ) -> ActorsV1GetActorMetricsResponse { + ActorsV1GetActorMetricsResponse { + actor_ids, + metric_names, + metric_attributes, + metric_types, + metric_values, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_get_actor_response.rs b/sdks/api/full/rust/src/models/actors_v1_get_actor_response.rs new file mode 100644 index 0000000000..a7669373c6 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_get_actor_response.rs @@ -0,0 +1,23 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1GetActorResponse { + #[serde(rename = "actor")] + pub actor: Box, +} + +impl ActorsV1GetActorResponse { + pub fn new(actor: crate::models::ActorsV1Actor) -> ActorsV1GetActorResponse { + ActorsV1GetActorResponse { + actor: Box::new(actor), + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_lifecycle.rs b/sdks/api/full/rust/src/models/actors_v1_lifecycle.rs new file mode 100644 index 0000000000..7e84ddbb78 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_lifecycle.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1Lifecycle { + /// The duration to wait for in milliseconds before killing the actor. This should be set to a safe default, and can be overridden during a DELETE request if needed. + #[serde(rename = "kill_timeout", skip_serializing_if = "Option::is_none")] + pub kill_timeout: Option, + /// If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. + #[serde(rename = "durable", skip_serializing_if = "Option::is_none")] + pub durable: Option, +} + +impl ActorsV1Lifecycle { + pub fn new() -> ActorsV1Lifecycle { + ActorsV1Lifecycle { + kill_timeout: None, + durable: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_list_actors_response.rs b/sdks/api/full/rust/src/models/actors_v1_list_actors_response.rs new file mode 100644 index 0000000000..6d8a1a1f0d --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_list_actors_response.rs @@ -0,0 +1,30 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1ListActorsResponse { + /// A list of actors for the project associated with the token. + #[serde(rename = "actors")] + pub actors: Vec, + #[serde(rename = "pagination")] + pub pagination: Box, +} + +impl ActorsV1ListActorsResponse { + pub fn new( + actors: Vec, + pagination: crate::models::Pagination, + ) -> ActorsV1ListActorsResponse { + ActorsV1ListActorsResponse { + actors, + pagination: Box::new(pagination), + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_network.rs b/sdks/api/full/rust/src/models/actors_v1_network.rs new file mode 100644 index 0000000000..5095a493ac --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_network.rs @@ -0,0 +1,26 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1Network { + #[serde(rename = "mode")] + pub mode: crate::models::ActorsV1NetworkMode, + #[serde(rename = "ports")] + pub ports: ::std::collections::HashMap, +} + +impl ActorsV1Network { + pub fn new( + mode: crate::models::ActorsV1NetworkMode, + ports: ::std::collections::HashMap, + ) -> ActorsV1Network { + ActorsV1Network { mode, ports } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_network_mode.rs b/sdks/api/full/rust/src/models/actors_v1_network_mode.rs new file mode 100644 index 0000000000..bcdd87fa5a --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_network_mode.rs @@ -0,0 +1,33 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ActorsV1NetworkMode { + #[serde(rename = "bridge")] + Bridge, + #[serde(rename = "host")] + Host, +} + +impl ToString for ActorsV1NetworkMode { + fn to_string(&self) -> String { + match self { + Self::Bridge => String::from("bridge"), + Self::Host => String::from("host"), + } + } +} + +impl Default for ActorsV1NetworkMode { + fn default() -> ActorsV1NetworkMode { + Self::Bridge + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_port.rs b/sdks/api/full/rust/src/models/actors_v1_port.rs new file mode 100644 index 0000000000..2b5cd29932 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_port.rs @@ -0,0 +1,45 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1Port { + #[serde(rename = "protocol")] + pub protocol: crate::models::ActorsV1PortProtocol, + #[serde(rename = "internal_port", skip_serializing_if = "Option::is_none")] + pub internal_port: Option, + #[serde(rename = "hostname", skip_serializing_if = "Option::is_none")] + pub hostname: Option, + #[serde(rename = "port", skip_serializing_if = "Option::is_none")] + pub port: Option, + #[serde(rename = "path", skip_serializing_if = "Option::is_none")] + pub path: Option, + /// Fully formed connection URL including protocol, hostname, port, and path, if applicable. + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, + #[serde(rename = "routing")] + pub routing: Box, +} + +impl ActorsV1Port { + pub fn new( + protocol: crate::models::ActorsV1PortProtocol, + routing: crate::models::ActorsV1PortRouting, + ) -> ActorsV1Port { + ActorsV1Port { + protocol, + internal_port: None, + hostname: None, + port: None, + path: None, + url: None, + routing: Box::new(routing), + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_port_protocol.rs b/sdks/api/full/rust/src/models/actors_v1_port_protocol.rs new file mode 100644 index 0000000000..4c5403fbe0 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_port_protocol.rs @@ -0,0 +1,42 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ActorsV1PortProtocol { + #[serde(rename = "http")] + Http, + #[serde(rename = "https")] + Https, + #[serde(rename = "tcp")] + Tcp, + #[serde(rename = "tcp_tls")] + TcpTls, + #[serde(rename = "udp")] + Udp, +} + +impl ToString for ActorsV1PortProtocol { + fn to_string(&self) -> String { + match self { + Self::Http => String::from("http"), + Self::Https => String::from("https"), + Self::Tcp => String::from("tcp"), + Self::TcpTls => String::from("tcp_tls"), + Self::Udp => String::from("udp"), + } + } +} + +impl Default for ActorsV1PortProtocol { + fn default() -> ActorsV1PortProtocol { + Self::Http + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_port_routing.rs b/sdks/api/full/rust/src/models/actors_v1_port_routing.rs new file mode 100644 index 0000000000..50af393940 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_port_routing.rs @@ -0,0 +1,26 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1PortRouting { + #[serde(rename = "guard", skip_serializing_if = "Option::is_none")] + pub guard: Option, + #[serde(rename = "host", skip_serializing_if = "Option::is_none")] + pub host: Option, +} + +impl ActorsV1PortRouting { + pub fn new() -> ActorsV1PortRouting { + ActorsV1PortRouting { + guard: None, + host: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_query_log_stream.rs b/sdks/api/full/rust/src/models/actors_v1_query_log_stream.rs new file mode 100644 index 0000000000..560ea89cdd --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_query_log_stream.rs @@ -0,0 +1,36 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ActorsV1QueryLogStream { + #[serde(rename = "std_out")] + StdOut, + #[serde(rename = "std_err")] + StdErr, + #[serde(rename = "all")] + All, +} + +impl ToString for ActorsV1QueryLogStream { + fn to_string(&self) -> String { + match self { + Self::StdOut => String::from("std_out"), + Self::StdErr => String::from("std_err"), + Self::All => String::from("all"), + } + } +} + +impl Default for ActorsV1QueryLogStream { + fn default() -> ActorsV1QueryLogStream { + Self::StdOut + } +} diff --git a/sdks/api/full/rust/src/models/actors_resources.rs b/sdks/api/full/rust/src/models/actors_v1_resources.rs similarity index 79% rename from sdks/api/full/rust/src/models/actors_resources.rs rename to sdks/api/full/rust/src/models/actors_v1_resources.rs index 5d0f916adc..401b03a7e9 100644 --- a/sdks/api/full/rust/src/models/actors_resources.rs +++ b/sdks/api/full/rust/src/models/actors_v1_resources.rs @@ -9,7 +9,7 @@ */ #[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] -pub struct ActorsResources { +pub struct ActorsV1Resources { /// The number of CPU cores in millicores, or 1/1000 of a core. For example, 1/8 of a core would be 125 millicores, and 1 core would be 1000 millicores. #[serde(rename = "cpu")] pub cpu: i32, @@ -18,8 +18,8 @@ pub struct ActorsResources { pub memory: i32, } -impl ActorsResources { - pub fn new(cpu: i32, memory: i32) -> ActorsResources { - ActorsResources { cpu, memory } +impl ActorsV1Resources { + pub fn new(cpu: i32, memory: i32) -> ActorsV1Resources { + ActorsV1Resources { cpu, memory } } } diff --git a/sdks/api/full/rust/src/models/actors_v1_runtime.rs b/sdks/api/full/rust/src/models/actors_v1_runtime.rs new file mode 100644 index 0000000000..40f8cdfef5 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_runtime.rs @@ -0,0 +1,29 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1Runtime { + #[serde(rename = "build")] + pub build: uuid::Uuid, + #[serde(rename = "arguments", skip_serializing_if = "Option::is_none")] + pub arguments: Option>, + #[serde(rename = "environment", skip_serializing_if = "Option::is_none")] + pub environment: Option<::std::collections::HashMap>, +} + +impl ActorsV1Runtime { + pub fn new(build: uuid::Uuid) -> ActorsV1Runtime { + ActorsV1Runtime { + build, + arguments: None, + environment: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_upgrade_actor_request.rs b/sdks/api/full/rust/src/models/actors_v1_upgrade_actor_request.rs new file mode 100644 index 0000000000..e978aa2663 --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_upgrade_actor_request.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1UpgradeActorRequest { + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde( + rename = "build_tags", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub build_tags: Option>, +} + +impl ActorsV1UpgradeActorRequest { + pub fn new() -> ActorsV1UpgradeActorRequest { + ActorsV1UpgradeActorRequest { + build: None, + build_tags: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_request.rs b/sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_request.rs new file mode 100644 index 0000000000..033261ec3a --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_request.rs @@ -0,0 +1,34 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1UpgradeAllActorsRequest { + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde( + rename = "build_tags", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub build_tags: Option>, +} + +impl ActorsV1UpgradeAllActorsRequest { + pub fn new(tags: Option) -> ActorsV1UpgradeAllActorsRequest { + ActorsV1UpgradeAllActorsRequest { + tags, + build: None, + build_tags: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_response.rs b/sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_response.rs new file mode 100644 index 0000000000..73360da07a --- /dev/null +++ b/sdks/api/full/rust/src/models/actors_v1_upgrade_all_actors_response.rs @@ -0,0 +1,21 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsV1UpgradeAllActorsResponse { + #[serde(rename = "count")] + pub count: i64, +} + +impl ActorsV1UpgradeAllActorsResponse { + pub fn new(count: i64) -> ActorsV1UpgradeAllActorsResponse { + ActorsV1UpgradeAllActorsResponse { count } + } +} diff --git a/sdks/api/full/rust/src/models/containers_container.rs b/sdks/api/full/rust/src/models/containers_container.rs new file mode 100644 index 0000000000..25a7d237cb --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_container.rs @@ -0,0 +1,63 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersContainer { + /// Can be a UUID or base36 encoded binary data. + #[serde(rename = "id")] + pub id: String, + #[serde(rename = "region")] + pub region: String, + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "runtime")] + pub runtime: Box, + #[serde(rename = "network")] + pub network: Box, + #[serde(rename = "resources")] + pub resources: Box, + #[serde(rename = "lifecycle")] + pub lifecycle: Box, + /// RFC3339 timestamp + #[serde(rename = "created_at")] + pub created_at: String, + /// RFC3339 timestamp + #[serde(rename = "started_at", skip_serializing_if = "Option::is_none")] + pub started_at: Option, + /// RFC3339 timestamp + #[serde(rename = "destroyed_at", skip_serializing_if = "Option::is_none")] + pub destroyed_at: Option, +} + +impl ContainersContainer { + pub fn new( + id: String, + region: String, + tags: Option, + runtime: crate::models::ContainersRuntime, + network: crate::models::ContainersNetwork, + resources: crate::models::ContainersResources, + lifecycle: crate::models::ContainersLifecycle, + created_at: String, + ) -> ContainersContainer { + ContainersContainer { + id, + region, + tags, + runtime: Box::new(runtime), + network: Box::new(network), + resources: Box::new(resources), + lifecycle: Box::new(lifecycle), + created_at, + started_at: None, + destroyed_at: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_create_container_network_request.rs b/sdks/api/full/rust/src/models/containers_create_container_network_request.rs new file mode 100644 index 0000000000..29db603d2c --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_create_container_network_request.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerNetworkRequest { + #[serde(rename = "mode", skip_serializing_if = "Option::is_none")] + pub mode: Option, + #[serde(rename = "ports", skip_serializing_if = "Option::is_none")] + pub ports: Option< + ::std::collections::HashMap, + >, + #[serde(rename = "wait_ready", skip_serializing_if = "Option::is_none")] + pub wait_ready: Option, +} + +impl ContainersCreateContainerNetworkRequest { + pub fn new() -> ContainersCreateContainerNetworkRequest { + ContainersCreateContainerNetworkRequest { + mode: None, + ports: None, + wait_ready: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_create_container_port_request.rs b/sdks/api/full/rust/src/models/containers_create_container_port_request.rs new file mode 100644 index 0000000000..3de16d3ffa --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_create_container_port_request.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerPortRequest { + #[serde(rename = "protocol")] + pub protocol: crate::models::ContainersPortProtocol, + #[serde(rename = "internal_port", skip_serializing_if = "Option::is_none")] + pub internal_port: Option, + #[serde(rename = "routing", skip_serializing_if = "Option::is_none")] + pub routing: Option>, +} + +impl ContainersCreateContainerPortRequest { + pub fn new( + protocol: crate::models::ContainersPortProtocol, + ) -> ContainersCreateContainerPortRequest { + ContainersCreateContainerPortRequest { + protocol, + internal_port: None, + routing: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_create_container_request.rs b/sdks/api/full/rust/src/models/containers_create_container_request.rs new file mode 100644 index 0000000000..86f4b4c2e3 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_create_container_request.rs @@ -0,0 +1,52 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerRequest { + #[serde(rename = "region", skip_serializing_if = "Option::is_none")] + pub region: Option, + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde( + rename = "build_tags", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub build_tags: Option>, + #[serde(rename = "runtime", skip_serializing_if = "Option::is_none")] + pub runtime: Option>, + #[serde(rename = "network", skip_serializing_if = "Option::is_none")] + pub network: Option>, + #[serde(rename = "resources")] + pub resources: Box, + #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] + pub lifecycle: Option>, +} + +impl ContainersCreateContainerRequest { + pub fn new( + tags: Option, + resources: crate::models::ContainersResources, + ) -> ContainersCreateContainerRequest { + ContainersCreateContainerRequest { + region: None, + tags, + build: None, + build_tags: None, + runtime: None, + network: None, + resources: Box::new(resources), + lifecycle: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_create_container_response.rs b/sdks/api/full/rust/src/models/containers_create_container_response.rs new file mode 100644 index 0000000000..9c78b01e61 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_create_container_response.rs @@ -0,0 +1,23 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerResponse { + #[serde(rename = "container")] + pub container: Box, +} + +impl ContainersCreateContainerResponse { + pub fn new(container: crate::models::ContainersContainer) -> ContainersCreateContainerResponse { + ContainersCreateContainerResponse { + container: Box::new(container), + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_create_container_runtime_network_request.rs b/sdks/api/full/rust/src/models/containers_create_container_runtime_network_request.rs new file mode 100644 index 0000000000..a2e59cc84a --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_create_container_runtime_network_request.rs @@ -0,0 +1,23 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerRuntimeNetworkRequest { + #[serde(rename = "endpoint_type")] + pub endpoint_type: crate::models::ContainersEndpointType, +} + +impl ContainersCreateContainerRuntimeNetworkRequest { + pub fn new( + endpoint_type: crate::models::ContainersEndpointType, + ) -> ContainersCreateContainerRuntimeNetworkRequest { + ContainersCreateContainerRuntimeNetworkRequest { endpoint_type } + } +} diff --git a/sdks/api/full/rust/src/models/containers_create_container_runtime_request.rs b/sdks/api/full/rust/src/models/containers_create_container_runtime_request.rs new file mode 100644 index 0000000000..782856511a --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_create_container_runtime_request.rs @@ -0,0 +1,26 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerRuntimeRequest { + #[serde(rename = "environment", skip_serializing_if = "Option::is_none")] + pub environment: Option<::std::collections::HashMap>, + #[serde(rename = "network", skip_serializing_if = "Option::is_none")] + pub network: Option>, +} + +impl ContainersCreateContainerRuntimeRequest { + pub fn new() -> ContainersCreateContainerRuntimeRequest { + ContainersCreateContainerRuntimeRequest { + environment: None, + network: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_endpoint_type.rs b/sdks/api/full/rust/src/models/containers_endpoint_type.rs new file mode 100644 index 0000000000..2ccbd02c52 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_endpoint_type.rs @@ -0,0 +1,33 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersEndpointType { + #[serde(rename = "hostname")] + Hostname, + #[serde(rename = "path")] + Path, +} + +impl ToString for ContainersEndpointType { + fn to_string(&self) -> String { + match self { + Self::Hostname => String::from("hostname"), + Self::Path => String::from("path"), + } + } +} + +impl Default for ContainersEndpointType { + fn default() -> ContainersEndpointType { + Self::Hostname + } +} diff --git a/sdks/api/full/rust/src/models/containers_get_container_logs_response.rs b/sdks/api/full/rust/src/models/containers_get_container_logs_response.rs new file mode 100644 index 0000000000..d35a84b0b3 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_get_container_logs_response.rs @@ -0,0 +1,55 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersGetContainerLogsResponse { + /// List of container IDs in these logs. The order of these correspond to the index in the log entry. + #[serde(rename = "container_ids")] + pub container_ids: Vec, + /// Sorted old to new. + #[serde(rename = "lines")] + pub lines: Vec, + /// Sorted old to new. + #[serde(rename = "timestamps")] + pub timestamps: Vec, + /// Streams the logs came from. 0 = stdout 1 = stderr + #[serde(rename = "streams")] + pub streams: Vec, + /// List of flags denoting if this log is not directly from the container. + #[serde(rename = "foreigns")] + pub foreigns: Vec, + /// Index of the container that this log was for. Use this index to look the full ID in `container_ids`. + #[serde(rename = "container_indices")] + pub container_indices: Vec, + #[serde(rename = "watch")] + pub watch: Box, +} + +impl ContainersGetContainerLogsResponse { + pub fn new( + container_ids: Vec, + lines: Vec, + timestamps: Vec, + streams: Vec, + foreigns: Vec, + container_indices: Vec, + watch: crate::models::WatchResponse, + ) -> ContainersGetContainerLogsResponse { + ContainersGetContainerLogsResponse { + container_ids, + lines, + timestamps, + streams, + foreigns, + container_indices, + watch: Box::new(watch), + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_get_container_metrics_response.rs b/sdks/api/full/rust/src/models/containers_get_container_metrics_response.rs new file mode 100644 index 0000000000..e81a61bf97 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_get_container_metrics_response.rs @@ -0,0 +1,41 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersGetContainerMetricsResponse { + #[serde(rename = "container_ids")] + pub container_ids: Vec, + #[serde(rename = "metric_names")] + pub metric_names: Vec, + #[serde(rename = "metric_attributes")] + pub metric_attributes: Vec<::std::collections::HashMap>, + #[serde(rename = "metric_types")] + pub metric_types: Vec, + #[serde(rename = "metric_values")] + pub metric_values: Vec>, +} + +impl ContainersGetContainerMetricsResponse { + pub fn new( + container_ids: Vec, + metric_names: Vec, + metric_attributes: Vec<::std::collections::HashMap>, + metric_types: Vec, + metric_values: Vec>, + ) -> ContainersGetContainerMetricsResponse { + ContainersGetContainerMetricsResponse { + container_ids, + metric_names, + metric_attributes, + metric_types, + metric_values, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_get_container_response.rs b/sdks/api/full/rust/src/models/containers_get_container_response.rs new file mode 100644 index 0000000000..701a6505cc --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_get_container_response.rs @@ -0,0 +1,23 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersGetContainerResponse { + #[serde(rename = "container")] + pub container: Box, +} + +impl ContainersGetContainerResponse { + pub fn new(container: crate::models::ContainersContainer) -> ContainersGetContainerResponse { + ContainersGetContainerResponse { + container: Box::new(container), + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_lifecycle.rs b/sdks/api/full/rust/src/models/containers_lifecycle.rs new file mode 100644 index 0000000000..9b731cf9ef --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_lifecycle.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersLifecycle { + /// The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. + #[serde(rename = "kill_timeout", skip_serializing_if = "Option::is_none")] + pub kill_timeout: Option, + /// If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. + #[serde(rename = "durable", skip_serializing_if = "Option::is_none")] + pub durable: Option, +} + +impl ContainersLifecycle { + pub fn new() -> ContainersLifecycle { + ContainersLifecycle { + kill_timeout: None, + durable: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_list_containers_response.rs b/sdks/api/full/rust/src/models/containers_list_containers_response.rs new file mode 100644 index 0000000000..2827f31370 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_list_containers_response.rs @@ -0,0 +1,30 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersListContainersResponse { + /// A list of containers for the project associated with the token. + #[serde(rename = "containers")] + pub containers: Vec, + #[serde(rename = "pagination")] + pub pagination: Box, +} + +impl ContainersListContainersResponse { + pub fn new( + containers: Vec, + pagination: crate::models::Pagination, + ) -> ContainersListContainersResponse { + ContainersListContainersResponse { + containers, + pagination: Box::new(pagination), + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_network.rs b/sdks/api/full/rust/src/models/containers_network.rs new file mode 100644 index 0000000000..d0343c4895 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_network.rs @@ -0,0 +1,26 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersNetwork { + #[serde(rename = "mode")] + pub mode: crate::models::ContainersNetworkMode, + #[serde(rename = "ports")] + pub ports: ::std::collections::HashMap, +} + +impl ContainersNetwork { + pub fn new( + mode: crate::models::ContainersNetworkMode, + ports: ::std::collections::HashMap, + ) -> ContainersNetwork { + ContainersNetwork { mode, ports } + } +} diff --git a/sdks/api/full/rust/src/models/containers_network_mode.rs b/sdks/api/full/rust/src/models/containers_network_mode.rs new file mode 100644 index 0000000000..4e3eda1bbc --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_network_mode.rs @@ -0,0 +1,33 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersNetworkMode { + #[serde(rename = "bridge")] + Bridge, + #[serde(rename = "host")] + Host, +} + +impl ToString for ContainersNetworkMode { + fn to_string(&self) -> String { + match self { + Self::Bridge => String::from("bridge"), + Self::Host => String::from("host"), + } + } +} + +impl Default for ContainersNetworkMode { + fn default() -> ContainersNetworkMode { + Self::Bridge + } +} diff --git a/sdks/api/full/rust/src/models/containers_port.rs b/sdks/api/full/rust/src/models/containers_port.rs new file mode 100644 index 0000000000..aeab73a366 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_port.rs @@ -0,0 +1,45 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersPort { + #[serde(rename = "protocol")] + pub protocol: crate::models::ContainersPortProtocol, + #[serde(rename = "internal_port", skip_serializing_if = "Option::is_none")] + pub internal_port: Option, + #[serde(rename = "hostname", skip_serializing_if = "Option::is_none")] + pub hostname: Option, + #[serde(rename = "port", skip_serializing_if = "Option::is_none")] + pub port: Option, + #[serde(rename = "path", skip_serializing_if = "Option::is_none")] + pub path: Option, + /// Fully formed connection URL including protocol, hostname, port, and path, if applicable. + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, + #[serde(rename = "routing")] + pub routing: Box, +} + +impl ContainersPort { + pub fn new( + protocol: crate::models::ContainersPortProtocol, + routing: crate::models::ContainersPortRouting, + ) -> ContainersPort { + ContainersPort { + protocol, + internal_port: None, + hostname: None, + port: None, + path: None, + url: None, + routing: Box::new(routing), + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_port_protocol.rs b/sdks/api/full/rust/src/models/containers_port_protocol.rs new file mode 100644 index 0000000000..389025831c --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_port_protocol.rs @@ -0,0 +1,42 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersPortProtocol { + #[serde(rename = "http")] + Http, + #[serde(rename = "https")] + Https, + #[serde(rename = "tcp")] + Tcp, + #[serde(rename = "tcp_tls")] + TcpTls, + #[serde(rename = "udp")] + Udp, +} + +impl ToString for ContainersPortProtocol { + fn to_string(&self) -> String { + match self { + Self::Http => String::from("http"), + Self::Https => String::from("https"), + Self::Tcp => String::from("tcp"), + Self::TcpTls => String::from("tcp_tls"), + Self::Udp => String::from("udp"), + } + } +} + +impl Default for ContainersPortProtocol { + fn default() -> ContainersPortProtocol { + Self::Http + } +} diff --git a/sdks/api/full/rust/src/models/containers_port_routing.rs b/sdks/api/full/rust/src/models/containers_port_routing.rs new file mode 100644 index 0000000000..3d65bd9901 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_port_routing.rs @@ -0,0 +1,26 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersPortRouting { + #[serde(rename = "guard", skip_serializing_if = "Option::is_none")] + pub guard: Option, + #[serde(rename = "host", skip_serializing_if = "Option::is_none")] + pub host: Option, +} + +impl ContainersPortRouting { + pub fn new() -> ContainersPortRouting { + ContainersPortRouting { + guard: None, + host: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_query_log_stream.rs b/sdks/api/full/rust/src/models/containers_query_log_stream.rs new file mode 100644 index 0000000000..872298f4cf --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_query_log_stream.rs @@ -0,0 +1,36 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersQueryLogStream { + #[serde(rename = "std_out")] + StdOut, + #[serde(rename = "std_err")] + StdErr, + #[serde(rename = "all")] + All, +} + +impl ToString for ContainersQueryLogStream { + fn to_string(&self) -> String { + match self { + Self::StdOut => String::from("std_out"), + Self::StdErr => String::from("std_err"), + Self::All => String::from("all"), + } + } +} + +impl Default for ContainersQueryLogStream { + fn default() -> ContainersQueryLogStream { + Self::StdOut + } +} diff --git a/sdks/api/full/rust/src/models/containers_resources.rs b/sdks/api/full/rust/src/models/containers_resources.rs new file mode 100644 index 0000000000..0e7e3a7bb9 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_resources.rs @@ -0,0 +1,25 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersResources { + /// The number of CPU cores in millicores, or 1/1000 of a core. For example, 1/8 of a core would be 125 millicores, and 1 core would be 1000 millicores. + #[serde(rename = "cpu")] + pub cpu: i32, + /// The amount of memory in megabytes + #[serde(rename = "memory")] + pub memory: i32, +} + +impl ContainersResources { + pub fn new(cpu: i32, memory: i32) -> ContainersResources { + ContainersResources { cpu, memory } + } +} diff --git a/sdks/api/full/rust/src/models/containers_runtime.rs b/sdks/api/full/rust/src/models/containers_runtime.rs new file mode 100644 index 0000000000..2de7b3b33d --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_runtime.rs @@ -0,0 +1,29 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersRuntime { + #[serde(rename = "build")] + pub build: uuid::Uuid, + #[serde(rename = "arguments", skip_serializing_if = "Option::is_none")] + pub arguments: Option>, + #[serde(rename = "environment", skip_serializing_if = "Option::is_none")] + pub environment: Option<::std::collections::HashMap>, +} + +impl ContainersRuntime { + pub fn new(build: uuid::Uuid) -> ContainersRuntime { + ContainersRuntime { + build, + arguments: None, + environment: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_upgrade_all_containers_request.rs b/sdks/api/full/rust/src/models/containers_upgrade_all_containers_request.rs new file mode 100644 index 0000000000..b2f36db0ae --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_upgrade_all_containers_request.rs @@ -0,0 +1,34 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersUpgradeAllContainersRequest { + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde( + rename = "build_tags", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub build_tags: Option>, +} + +impl ContainersUpgradeAllContainersRequest { + pub fn new(tags: Option) -> ContainersUpgradeAllContainersRequest { + ContainersUpgradeAllContainersRequest { + tags, + build: None, + build_tags: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/containers_upgrade_all_containers_response.rs b/sdks/api/full/rust/src/models/containers_upgrade_all_containers_response.rs new file mode 100644 index 0000000000..2ba81a295b --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_upgrade_all_containers_response.rs @@ -0,0 +1,21 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersUpgradeAllContainersResponse { + #[serde(rename = "count")] + pub count: i64, +} + +impl ContainersUpgradeAllContainersResponse { + pub fn new(count: i64) -> ContainersUpgradeAllContainersResponse { + ContainersUpgradeAllContainersResponse { count } + } +} diff --git a/sdks/api/full/rust/src/models/containers_upgrade_container_request.rs b/sdks/api/full/rust/src/models/containers_upgrade_container_request.rs new file mode 100644 index 0000000000..c416495988 --- /dev/null +++ b/sdks/api/full/rust/src/models/containers_upgrade_container_request.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersUpgradeContainerRequest { + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde( + rename = "build_tags", + default, + with = "::serde_with::rust::double_option", + skip_serializing_if = "Option::is_none" + )] + pub build_tags: Option>, +} + +impl ContainersUpgradeContainerRequest { + pub fn new() -> ContainersUpgradeContainerRequest { + ContainersUpgradeContainerRequest { + build: None, + build_tags: None, + } + } +} diff --git a/sdks/api/full/rust/src/models/mod.rs b/sdks/api/full/rust/src/models/mod.rs index 705d66a9f2..b524f9c3ba 100644 --- a/sdks/api/full/rust/src/models/mod.rs +++ b/sdks/api/full/rust/src/models/mod.rs @@ -40,10 +40,15 @@ pub mod actors_port_protocol; pub use self::actors_port_protocol::ActorsPortProtocol; pub mod actors_port_routing; pub use self::actors_port_routing::ActorsPortRouting; +<<<<<<< HEAD pub mod actors_query_actors_response; pub use self::actors_query_actors_response::ActorsQueryActorsResponse; pub mod actors_resources; pub use self::actors_resources::ActorsResources; +======= +pub mod actors_query_log_stream; +pub use self::actors_query_log_stream::ActorsQueryLogStream; +>>>>>>> 43e5048bc (fix: api changes) pub mod actors_runtime; pub use self::actors_runtime::ActorsRuntime; pub mod actors_upgrade_actor_request; @@ -52,6 +57,54 @@ pub mod actors_upgrade_all_actors_request; pub use self::actors_upgrade_all_actors_request::ActorsUpgradeAllActorsRequest; pub mod actors_upgrade_all_actors_response; pub use self::actors_upgrade_all_actors_response::ActorsUpgradeAllActorsResponse; +pub mod actors_v1_actor; +pub use self::actors_v1_actor::ActorsV1Actor; +pub mod actors_v1_create_actor_network_request; +pub use self::actors_v1_create_actor_network_request::ActorsV1CreateActorNetworkRequest; +pub mod actors_v1_create_actor_port_request; +pub use self::actors_v1_create_actor_port_request::ActorsV1CreateActorPortRequest; +pub mod actors_v1_create_actor_request; +pub use self::actors_v1_create_actor_request::ActorsV1CreateActorRequest; +pub mod actors_v1_create_actor_response; +pub use self::actors_v1_create_actor_response::ActorsV1CreateActorResponse; +pub mod actors_v1_create_actor_runtime_network_request; +pub use self::actors_v1_create_actor_runtime_network_request::ActorsV1CreateActorRuntimeNetworkRequest; +pub mod actors_v1_create_actor_runtime_request; +pub use self::actors_v1_create_actor_runtime_request::ActorsV1CreateActorRuntimeRequest; +pub mod actors_v1_endpoint_type; +pub use self::actors_v1_endpoint_type::ActorsV1EndpointType; +pub mod actors_v1_get_actor_logs_response; +pub use self::actors_v1_get_actor_logs_response::ActorsV1GetActorLogsResponse; +pub mod actors_v1_get_actor_metrics_response; +pub use self::actors_v1_get_actor_metrics_response::ActorsV1GetActorMetricsResponse; +pub mod actors_v1_get_actor_response; +pub use self::actors_v1_get_actor_response::ActorsV1GetActorResponse; +pub mod actors_v1_lifecycle; +pub use self::actors_v1_lifecycle::ActorsV1Lifecycle; +pub mod actors_v1_list_actors_response; +pub use self::actors_v1_list_actors_response::ActorsV1ListActorsResponse; +pub mod actors_v1_network; +pub use self::actors_v1_network::ActorsV1Network; +pub mod actors_v1_network_mode; +pub use self::actors_v1_network_mode::ActorsV1NetworkMode; +pub mod actors_v1_port; +pub use self::actors_v1_port::ActorsV1Port; +pub mod actors_v1_port_protocol; +pub use self::actors_v1_port_protocol::ActorsV1PortProtocol; +pub mod actors_v1_port_routing; +pub use self::actors_v1_port_routing::ActorsV1PortRouting; +pub mod actors_v1_query_log_stream; +pub use self::actors_v1_query_log_stream::ActorsV1QueryLogStream; +pub mod actors_v1_resources; +pub use self::actors_v1_resources::ActorsV1Resources; +pub mod actors_v1_runtime; +pub use self::actors_v1_runtime::ActorsV1Runtime; +pub mod actors_v1_upgrade_actor_request; +pub use self::actors_v1_upgrade_actor_request::ActorsV1UpgradeActorRequest; +pub mod actors_v1_upgrade_all_actors_request; +pub use self::actors_v1_upgrade_all_actors_request::ActorsV1UpgradeAllActorsRequest; +pub mod actors_v1_upgrade_all_actors_response; +pub use self::actors_v1_upgrade_all_actors_response::ActorsV1UpgradeAllActorsResponse; pub mod auth_complete_status; pub use self::auth_complete_status::AuthCompleteStatus; pub mod auth_identity_complete_email_verification_request; @@ -392,6 +445,54 @@ pub mod cloud_version_matchmaker_proxy_kind; pub use self::cloud_version_matchmaker_proxy_kind::CloudVersionMatchmakerProxyKind; pub mod cloud_version_summary; pub use self::cloud_version_summary::CloudVersionSummary; +pub mod containers_container; +pub use self::containers_container::ContainersContainer; +pub mod containers_create_container_network_request; +pub use self::containers_create_container_network_request::ContainersCreateContainerNetworkRequest; +pub mod containers_create_container_port_request; +pub use self::containers_create_container_port_request::ContainersCreateContainerPortRequest; +pub mod containers_create_container_request; +pub use self::containers_create_container_request::ContainersCreateContainerRequest; +pub mod containers_create_container_response; +pub use self::containers_create_container_response::ContainersCreateContainerResponse; +pub mod containers_create_container_runtime_network_request; +pub use self::containers_create_container_runtime_network_request::ContainersCreateContainerRuntimeNetworkRequest; +pub mod containers_create_container_runtime_request; +pub use self::containers_create_container_runtime_request::ContainersCreateContainerRuntimeRequest; +pub mod containers_endpoint_type; +pub use self::containers_endpoint_type::ContainersEndpointType; +pub mod containers_get_container_logs_response; +pub use self::containers_get_container_logs_response::ContainersGetContainerLogsResponse; +pub mod containers_get_container_metrics_response; +pub use self::containers_get_container_metrics_response::ContainersGetContainerMetricsResponse; +pub mod containers_get_container_response; +pub use self::containers_get_container_response::ContainersGetContainerResponse; +pub mod containers_lifecycle; +pub use self::containers_lifecycle::ContainersLifecycle; +pub mod containers_list_containers_response; +pub use self::containers_list_containers_response::ContainersListContainersResponse; +pub mod containers_network; +pub use self::containers_network::ContainersNetwork; +pub mod containers_network_mode; +pub use self::containers_network_mode::ContainersNetworkMode; +pub mod containers_port; +pub use self::containers_port::ContainersPort; +pub mod containers_port_protocol; +pub use self::containers_port_protocol::ContainersPortProtocol; +pub mod containers_port_routing; +pub use self::containers_port_routing::ContainersPortRouting; +pub mod containers_query_log_stream; +pub use self::containers_query_log_stream::ContainersQueryLogStream; +pub mod containers_resources; +pub use self::containers_resources::ContainersResources; +pub mod containers_runtime; +pub use self::containers_runtime::ContainersRuntime; +pub mod containers_upgrade_all_containers_request; +pub use self::containers_upgrade_all_containers_request::ContainersUpgradeAllContainersRequest; +pub mod containers_upgrade_all_containers_response; +pub use self::containers_upgrade_all_containers_response::ContainersUpgradeAllContainersResponse; +pub mod containers_upgrade_container_request; +pub use self::containers_upgrade_container_request::ContainersUpgradeContainerRequest; pub mod core_intercom_pegboard_mark_client_registered_request; pub use self::core_intercom_pegboard_mark_client_registered_request::CoreIntercomPegboardMarkClientRegisteredRequest; pub mod edge_intercom_pegboard_toggle_client_drain_request; diff --git a/sdks/api/full/typescript/src/Client.ts b/sdks/api/full/typescript/src/Client.ts index 0457a8b8ec..c6840148b1 100644 --- a/sdks/api/full/typescript/src/Client.ts +++ b/sdks/api/full/typescript/src/Client.ts @@ -7,6 +7,7 @@ import * as core from "./core"; import { Actors } from "./api/resources/actors/client/Client"; import { Builds } from "./api/resources/builds/client/Client"; import { Cloud } from "./api/resources/cloud/client/Client"; +import { Containers } from "./api/resources/containers/client/Client"; import { CoreIntercom } from "./api/resources/coreIntercom/client/Client"; import { EdgeIntercom } from "./api/resources/edgeIntercom/client/Client"; import { Group } from "./api/resources/group/client/Client"; @@ -50,6 +51,7 @@ export class RivetClient { protected _actors: Actors | undefined; protected _builds: Builds | undefined; protected _cloud: Cloud | undefined; + protected _containers: Containers | undefined; protected _coreIntercom: CoreIntercom | undefined; protected _edgeIntercom: EdgeIntercom | undefined; protected _group: Group | undefined; @@ -78,6 +80,10 @@ export class RivetClient { return (this._cloud ??= new Cloud(this._options)); } + public get containers(): Containers { + return (this._containers ??= new Containers(this._options)); + } + public get coreIntercom(): CoreIntercom { return (this._coreIntercom ??= new CoreIntercom(this._options)); } diff --git a/sdks/api/full/typescript/src/api/resources/actors/client/Client.ts b/sdks/api/full/typescript/src/api/resources/actors/client/Client.ts index 4f939eef9e..a7b01a5eb4 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/client/Client.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/client/Client.ts @@ -8,6 +8,7 @@ import * as Rivet from "../../../index"; import * as serializers from "../../../../serialization/index"; import urlJoin from "url-join"; import * as errors from "../../../../errors/index"; +import { V1 } from "../resources/v1/client/Client"; import { Logs } from "../resources/logs/client/Client"; import { Metrics } from "../resources/metrics/client/Client"; @@ -37,11 +38,16 @@ export declare namespace Actors { } export class Actors { + protected _v1: V1 | undefined; protected _logs: Logs | undefined; protected _metrics: Metrics | undefined; constructor(protected readonly _options: Actors.Options = {}) {} + public get v1(): V1 { + return (this._v1 ??= new V1(this._options)); + } + public get logs(): Logs { return (this._logs ??= new Logs(this._options)); } @@ -97,7 +103,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, ), method: "GET", headers: { @@ -202,7 +208,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors/{actor}."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v2/actors/{actor}."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -270,7 +276,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors", + "/v2/actors", ), method: "GET", headers: { @@ -375,7 +381,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v2/actors."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -421,10 +427,6 @@ export class Actors { * ports: {}, * waitReady: true * }, - * resources: { - * cpu: 1, - * memory: 1 - * }, * lifecycle: { * killTimeout: 1000000, * durable: true @@ -457,7 +459,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors", + "/v2/actors", ), method: "POST", headers: { @@ -563,7 +565,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v2/actors."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -616,7 +618,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, ), method: "DELETE", headers: { @@ -721,7 +723,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /actors/{actor}."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /v2/actors/{actor}."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -775,7 +777,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}/upgrade`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}/upgrade`, ), method: "POST", headers: { @@ -881,7 +883,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors/{actor}/upgrade."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v2/actors/{actor}/upgrade."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -936,7 +938,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors/upgrade", + "/v2/actors/upgrade", ), method: "POST", headers: { @@ -1042,7 +1044,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors/upgrade."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v2/actors/upgrade."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, diff --git a/sdks/api/full/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts index 9ffc6e6076..90685a87a4 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts @@ -30,10 +30,6 @@ import * as Rivet from "../../../../index"; * ports: {}, * waitReady: true * }, - * resources: { - * cpu: 1, - * memory: 1 - * }, * lifecycle: { * killTimeout: 1000000, * durable: true diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/Actor.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/Actor.ts index 1aeb34bd9f..268707a801 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/Actor.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/Actor.ts @@ -10,7 +10,6 @@ export interface Actor { tags?: unknown; runtime: Rivet.actors.Runtime; network: Rivet.actors.Network; - resources?: Rivet.actors.Resources; lifecycle: Rivet.actors.Lifecycle; createdAt: Rivet.Timestamp; startedAt?: Rivet.Timestamp; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/index.ts index e8ecca191e..ced439f191 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/index.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/index.ts @@ -1,7 +1,6 @@ export * from "./Actor"; export * from "./Runtime"; export * from "./Lifecycle"; -export * from "./Resources"; export * from "./Network"; export * from "./NetworkMode"; export * from "./Port"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/index.ts index 8b57b1caf5..f81ddf19c8 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/resources/index.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/index.ts @@ -1,3 +1,4 @@ +export * as v1 from "./v1"; export * as common from "./common"; export * from "./common/types"; export * as logs from "./logs"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/logs/client/Client.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/logs/client/Client.ts index 7f40190724..b99aeb5adc 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/resources/logs/client/Client.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/logs/client/Client.ts @@ -85,7 +85,7 @@ export class Logs { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors/logs", + "/v2/actors/logs", ), method: "GET", headers: { @@ -190,7 +190,7 @@ export class Logs { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors/logs."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v2/actors/logs."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts index 8d6370cdfe..00781f554c 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts @@ -6,7 +6,7 @@ import * as Rivet from "../../../../../index"; export interface GetActorLogsResponse { /** List of actor IDs in these logs. The order of these correspond to the index in the log entry. */ - actorIds: Rivet.Id[]; + actorIds: string[]; /** Sorted old to new. */ lines: string[]; /** Sorted old to new. */ diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/metrics/client/Client.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/metrics/client/Client.ts index 3269edea4d..490800a195 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/resources/metrics/client/Client.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/metrics/client/Client.ts @@ -5,8 +5,8 @@ import * as environments from "../../../../../../environments"; import * as core from "../../../../../../core"; import * as Rivet from "../../../../../index"; -import urlJoin from "url-join"; import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; import * as errors from "../../../../../../errors/index"; export declare namespace Metrics { @@ -40,7 +40,7 @@ export class Metrics { /** * Returns the metrics for a given actor. * - * @param {string} actor - The id of the actor to destroy + * @param {Rivet.Id} actor - The id of the actor to destroy * @param {Rivet.actors.GetActorMetricsRequestQuery} request * @param {Metrics.RequestOptions} requestOptions - Request-specific configuration. * @@ -52,7 +52,7 @@ export class Metrics { * @throws {@link Rivet.BadRequestError} * * @example - * await client.actors.metrics.get("d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", { + * await client.actors.metrics.get("string", { * project: "string", * environment: "string", * start: 1, @@ -61,7 +61,7 @@ export class Metrics { * }) */ public async get( - actor: string, + actor: Rivet.Id, request: Rivet.actors.GetActorMetricsRequestQuery, requestOptions?: Metrics.RequestOptions, ): Promise { @@ -83,7 +83,7 @@ export class Metrics { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(actor)}/metrics/history`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}/metrics/history`, ), method: "GET", headers: { @@ -189,7 +189,7 @@ export class Metrics { }); case "timeout": throw new errors.RivetTimeoutError( - "Timeout exceeded when calling GET /actors/{actor}/metrics/history.", + "Timeout exceeded when calling GET /v2/actors/{actor}/metrics/history.", ); case "unknown": throw new errors.RivetError({ diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/Client.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/Client.ts new file mode 100644 index 0000000000..1d489d1e5a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/Client.ts @@ -0,0 +1,1061 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../../index"; +import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../errors/index"; +import { Logs } from "../resources/logs/client/Client"; +import { Metrics } from "../resources/metrics/client/Client"; + +export declare namespace V1 { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class V1 { + protected _logs: Logs | undefined; + protected _metrics: Metrics | undefined; + + constructor(protected readonly _options: V1.Options = {}) {} + + public get logs(): Logs { + return (this._logs ??= new Logs(this._options)); + } + + public get metrics(): Metrics { + return (this._metrics ??= new Metrics(this._options)); + } + + /** + * Gets a actor. + * + * @param {string} actor - The id of the actor to destroy + * @param {Rivet.actors.v1.ListActorsRequestQuery} request + * @param {V1.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.get("d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", { + * project: "string", + * environment: "string", + * endpointType: "hostname" + * }) + */ + public async get( + actor: string, + request: Rivet.actors.v1.ListActorsRequestQuery = {}, + requestOptions?: V1.RequestOptions, + ): Promise { + const { project, environment, endpointType } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.actors.v1.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/actors/${encodeURIComponent(actor)}`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.GetActorResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors/{actor}."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Lists all actors associated with the token used. Can be filtered by tags in the query string. + * + * @param {Rivet.actors.v1.GetActorsRequestQuery} request + * @param {V1.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.list({ + * project: "string", + * environment: "string", + * endpointType: "hostname", + * tagsJson: "string", + * includeDestroyed: true, + * cursor: "string" + * }) + */ + public async list( + request: Rivet.actors.v1.GetActorsRequestQuery = {}, + requestOptions?: V1.RequestOptions, + ): Promise { + const { project, environment, endpointType, tagsJson, includeDestroyed, cursor } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.actors.v1.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + if (tagsJson != null) { + _queryParams["tags_json"] = tagsJson; + } + + if (includeDestroyed != null) { + _queryParams["include_destroyed"] = includeDestroyed.toString(); + } + + if (cursor != null) { + _queryParams["cursor"] = cursor; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/actors", + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.ListActorsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Create a new actor. + * + * @param {Rivet.actors.v1.CreateActorRequestQuery} request + * @param {V1.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.create({ + * project: "string", + * environment: "string", + * endpointType: "hostname", + * body: { + * region: "string", + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * }, + * runtime: { + * environment: {}, + * network: { + * endpointType: "hostname" + * } + * }, + * network: { + * mode: "bridge", + * ports: {}, + * waitReady: true + * }, + * resources: { + * cpu: 1, + * memory: 1 + * }, + * lifecycle: { + * killTimeout: 1000000, + * durable: true + * } + * } + * }) + */ + public async create( + request: Rivet.actors.v1.CreateActorRequestQuery, + requestOptions?: V1.RequestOptions, + ): Promise { + const { project, environment, endpointType, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.actors.v1.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/actors", + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.actors.v1.CreateActorRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.CreateActorResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Destroy a actor. + * + * @param {string} actor - The id of the actor to destroy + * @param {Rivet.actors.v1.DestroyActorRequestQuery} request + * @param {V1.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.destroy("d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", { + * project: "string", + * environment: "string", + * overrideKillTimeout: 1000000 + * }) + */ + public async destroy( + actor: string, + request: Rivet.actors.v1.DestroyActorRequestQuery = {}, + requestOptions?: V1.RequestOptions, + ): Promise { + const { project, environment, overrideKillTimeout } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (overrideKillTimeout != null) { + _queryParams["override_kill_timeout"] = overrideKillTimeout.toString(); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/actors/${encodeURIComponent(actor)}`, + ), + method: "DELETE", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.DestroyActorResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /actors/{actor}."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Upgrades a actor. + * + * @param {string} actor - The id of the actor to upgrade + * @param {Rivet.actors.v1.UpgradeActorRequestQuery} request + * @param {V1.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.upgrade("d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", { + * project: "string", + * environment: "string", + * body: { + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * }) + */ + public async upgrade( + actor: string, + request: Rivet.actors.v1.UpgradeActorRequestQuery, + requestOptions?: V1.RequestOptions, + ): Promise { + const { project, environment, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/actors/${encodeURIComponent(actor)}/upgrade`, + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.actors.v1.UpgradeActorRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.UpgradeActorResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors/{actor}/upgrade."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Upgrades all actors matching the given tags. + * + * @param {Rivet.actors.v1.UpgradeAllActorsRequestQuery} request + * @param {V1.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.upgradeAll({ + * project: "string", + * environment: "string", + * body: { + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * }) + */ + public async upgradeAll( + request: Rivet.actors.v1.UpgradeAllActorsRequestQuery, + requestOptions?: V1.RequestOptions, + ): Promise { + const { project, environment, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/actors/upgrade", + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.actors.v1.UpgradeAllActorsRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.UpgradeAllActorsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors/upgrade."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/CreateActorRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/CreateActorRequestQuery.ts new file mode 100644 index 0000000000..a3c2e860aa --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/CreateActorRequestQuery.ts @@ -0,0 +1,49 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname", + * body: { + * region: "string", + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * }, + * runtime: { + * environment: {}, + * network: { + * endpointType: "hostname" + * } + * }, + * network: { + * mode: "bridge", + * ports: {}, + * waitReady: true + * }, + * resources: { + * cpu: 1, + * memory: 1 + * }, + * lifecycle: { + * killTimeout: 1000000, + * durable: true + * } + * } + * } + */ +export interface CreateActorRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.actors.v1.EndpointType; + body: Rivet.actors.v1.CreateActorRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/DestroyActorRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/DestroyActorRequestQuery.ts new file mode 100644 index 0000000000..9c20113f85 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/DestroyActorRequestQuery.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * overrideKillTimeout: 1000000 + * } + */ +export interface DestroyActorRequestQuery { + project?: string; + environment?: string; + /** + * The duration to wait for in milliseconds before killing the actor. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. + */ + overrideKillTimeout?: number; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/GetActorsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/GetActorsRequestQuery.ts new file mode 100644 index 0000000000..6f8e0835bd --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/GetActorsRequestQuery.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname", + * tagsJson: "string", + * includeDestroyed: true, + * cursor: "string" + * } + */ +export interface GetActorsRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.actors.v1.EndpointType; + tagsJson?: string; + includeDestroyed?: boolean; + cursor?: string; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/ListActorsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/ListActorsRequestQuery.ts new file mode 100644 index 0000000000..6c8afad514 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/ListActorsRequestQuery.ts @@ -0,0 +1,19 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname" + * } + */ +export interface ListActorsRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.actors.v1.EndpointType; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeActorRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeActorRequestQuery.ts new file mode 100644 index 0000000000..611a01637a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeActorRequestQuery.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * body: { + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * } + */ +export interface UpgradeActorRequestQuery { + project?: string; + environment?: string; + body: Rivet.actors.v1.UpgradeActorRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeAllActorsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeAllActorsRequestQuery.ts new file mode 100644 index 0000000000..f6b750bd0a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/UpgradeAllActorsRequestQuery.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * body: { + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * } + */ +export interface UpgradeAllActorsRequestQuery { + project?: string; + environment?: string; + body: Rivet.actors.v1.UpgradeAllActorsRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/index.ts new file mode 100644 index 0000000000..8953e26e0c --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/client/requests/index.ts @@ -0,0 +1,6 @@ +export { type ListActorsRequestQuery } from "./ListActorsRequestQuery"; +export { type GetActorsRequestQuery } from "./GetActorsRequestQuery"; +export { type CreateActorRequestQuery } from "./CreateActorRequestQuery"; +export { type DestroyActorRequestQuery } from "./DestroyActorRequestQuery"; +export { type UpgradeActorRequestQuery } from "./UpgradeActorRequestQuery"; +export { type UpgradeAllActorsRequestQuery } from "./UpgradeAllActorsRequestQuery"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/index.ts new file mode 100644 index 0000000000..a931b36375 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/index.ts @@ -0,0 +1,3 @@ +export * from "./types"; +export * from "./resources"; +export * from "./client"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Actor.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Actor.ts new file mode 100644 index 0000000000..2f4a0ae09a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Actor.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../../index"; + +export interface Actor { + id: string; + region: string; + tags?: unknown; + runtime: Rivet.actors.v1.Runtime; + network: Rivet.actors.v1.Network; + resources?: Rivet.actors.v1.Resources; + lifecycle: Rivet.actors.v1.Lifecycle; + createdAt: Rivet.Timestamp; + startedAt?: Rivet.Timestamp; + destroyedAt?: Rivet.Timestamp; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/EndpointType.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/EndpointType.ts new file mode 100644 index 0000000000..3385b64fb7 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/EndpointType.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type EndpointType = "hostname" | "path"; +export const EndpointType = { + Hostname: "hostname", + Path: "path", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/GuardRouting.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/GuardRouting.ts new file mode 100644 index 0000000000..039f163247 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/GuardRouting.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GuardRouting {} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/HostRouting.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/HostRouting.ts new file mode 100644 index 0000000000..cdd2164517 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/HostRouting.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface HostRouting {} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Lifecycle.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Lifecycle.ts new file mode 100644 index 0000000000..c6c949e83f --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Lifecycle.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Lifecycle { + /** The duration to wait for in milliseconds before killing the actor. This should be set to a safe default, and can be overridden during a DELETE request if needed. */ + killTimeout?: number; + /** If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. */ + durable?: boolean; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Network.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Network.ts new file mode 100644 index 0000000000..70a960fece --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Network.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../../index"; + +export interface Network { + mode: Rivet.actors.v1.NetworkMode; + ports: Record; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/NetworkMode.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/NetworkMode.ts new file mode 100644 index 0000000000..14aee186e8 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/NetworkMode.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type NetworkMode = "bridge" | "host"; +export const NetworkMode = { + Bridge: "bridge", + Host: "host", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Port.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Port.ts new file mode 100644 index 0000000000..0046910e6a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Port.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../../index"; + +export interface Port { + protocol: Rivet.actors.v1.PortProtocol; + internalPort?: number; + hostname?: string; + port?: number; + path?: string; + /** Fully formed connection URL including protocol, hostname, port, and path, if applicable. */ + url?: string; + routing: Rivet.actors.v1.PortRouting; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortProtocol.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortProtocol.ts new file mode 100644 index 0000000000..0ec9364df2 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortProtocol.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type PortProtocol = "http" | "https" | "tcp" | "tcp_tls" | "udp"; +export const PortProtocol = { + Http: "http", + Https: "https", + Tcp: "tcp", + TcpTls: "tcp_tls", + Udp: "udp", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortRouting.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortRouting.ts new file mode 100644 index 0000000000..b9fc43b524 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/PortRouting.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../../index"; + +export interface PortRouting { + guard?: Rivet.actors.v1.GuardRouting; + host?: Rivet.actors.v1.HostRouting; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/common/types/Resources.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Resources.ts similarity index 100% rename from sdks/api/full/typescript/src/api/resources/actors/resources/common/types/Resources.ts rename to sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Resources.ts diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Runtime.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Runtime.ts new file mode 100644 index 0000000000..a918ef8387 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/Runtime.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Runtime { + build: string; + arguments?: string[]; + environment?: Record; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/index.ts new file mode 100644 index 0000000000..e8ecca191e --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/common/types/index.ts @@ -0,0 +1,12 @@ +export * from "./Actor"; +export * from "./Runtime"; +export * from "./Lifecycle"; +export * from "./Resources"; +export * from "./Network"; +export * from "./NetworkMode"; +export * from "./Port"; +export * from "./PortProtocol"; +export * from "./PortRouting"; +export * from "./GuardRouting"; +export * from "./HostRouting"; +export * from "./EndpointType"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/index.ts new file mode 100644 index 0000000000..8b57b1caf5 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/index.ts @@ -0,0 +1,8 @@ +export * as common from "./common"; +export * from "./common/types"; +export * as logs from "./logs"; +export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; +export * from "./logs/client/requests"; +export * from "./metrics/client/requests"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/Client.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/Client.ts new file mode 100644 index 0000000000..2043eba0a0 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/Client.ts @@ -0,0 +1,234 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../../../environments"; +import * as core from "../../../../../../../../core"; +import * as Rivet from "../../../../../../../index"; +import * as serializers from "../../../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../../../errors/index"; + +export declare namespace Logs { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Logs { + constructor(protected readonly _options: Logs.Options = {}) {} + + /** + * Returns the logs for a given actor. + * + * @param {Rivet.actors.v1.GetActorLogsRequestQuery} request + * @param {Logs.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.logs.get({ + * project: "string", + * environment: "string", + * stream: "std_out", + * actorIdsJson: "string", + * searchText: "string", + * searchCaseSensitive: true, + * searchEnableRegex: true, + * watchIndex: "string" + * }) + */ + public async get( + request: Rivet.actors.v1.GetActorLogsRequestQuery, + requestOptions?: Logs.RequestOptions, + ): Promise { + const { + project, + environment, + stream, + actorIdsJson, + searchText, + searchCaseSensitive, + searchEnableRegex, + watchIndex, + } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["stream"] = serializers.actors.v1.QueryLogStream.jsonOrThrow(stream, { + unrecognizedObjectKeys: "strip", + }); + _queryParams["actor_ids_json"] = actorIdsJson; + if (searchText != null) { + _queryParams["search_text"] = searchText; + } + + if (searchCaseSensitive != null) { + _queryParams["search_case_sensitive"] = searchCaseSensitive.toString(); + } + + if (searchEnableRegex != null) { + _queryParams["search_enable_regex"] = searchEnableRegex.toString(); + } + + if (watchIndex != null) { + _queryParams["watch_index"] = watchIndex; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/actors/logs", + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.GetActorLogsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors/logs."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/GetActorLogsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/GetActorLogsRequestQuery.ts new file mode 100644 index 0000000000..9aba6ea8bc --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/GetActorLogsRequestQuery.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * stream: "std_out", + * actorIdsJson: "string", + * searchText: "string", + * searchCaseSensitive: true, + * searchEnableRegex: true, + * watchIndex: "string" + * } + */ +export interface GetActorLogsRequestQuery { + project?: string; + environment?: string; + stream: Rivet.actors.v1.QueryLogStream; + actorIdsJson: string; + searchText?: string; + searchCaseSensitive?: boolean; + searchEnableRegex?: boolean; + /** + * A query parameter denoting the requests watch index. + */ + watchIndex?: string; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/index.ts new file mode 100644 index 0000000000..599c89b601 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetActorLogsRequestQuery } from "./GetActorLogsRequestQuery"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts new file mode 100644 index 0000000000..065a5a1fe4 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../../index"; + +export interface GetActorLogsResponse { + /** List of actor IDs in these logs. The order of these correspond to the index in the log entry. */ + actorIds: string[]; + /** Sorted old to new. */ + lines: string[]; + /** Sorted old to new. */ + timestamps: Rivet.Timestamp[]; + /** + * Streams the logs came from. + * + * 0 = stdout + * 1 = stderr + */ + streams: number[]; + /** Index of the actor that this log was for. Use this index to look the full ID in `actor_ids`. */ + actorIndices: number[]; + watch: Rivet.WatchResponse; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts new file mode 100644 index 0000000000..556646c57e --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type QueryLogStream = "std_out" | "std_err" | "all"; +export const QueryLogStream = { + StdOut: "std_out", + StdErr: "std_err", + All: "all", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/index.ts new file mode 100644 index 0000000000..a7db7fd2b2 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/logs/types/index.ts @@ -0,0 +1,2 @@ +export * from "./GetActorLogsResponse"; +export * from "./QueryLogStream"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/Client.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/Client.ts new file mode 100644 index 0000000000..b436cdc96a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/Client.ts @@ -0,0 +1,209 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../../../environments"; +import * as core from "../../../../../../../../core"; +import * as Rivet from "../../../../../../../index"; +import urlJoin from "url-join"; +import * as serializers from "../../../../../../../../serialization/index"; +import * as errors from "../../../../../../../../errors/index"; + +export declare namespace Metrics { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Metrics { + constructor(protected readonly _options: Metrics.Options = {}) {} + + /** + * Returns the metrics for a given actor. + * + * @param {string} actor - The id of the actor to destroy + * @param {Rivet.actors.v1.GetActorMetricsRequestQuery} request + * @param {Metrics.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.v1.metrics.get("d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * }) + */ + public async get( + actor: string, + request: Rivet.actors.v1.GetActorMetricsRequestQuery, + requestOptions?: Metrics.RequestOptions, + ): Promise { + const { project, environment, start, end, interval } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["start"] = start.toString(); + _queryParams["end"] = end.toString(); + _queryParams["interval"] = interval.toString(); + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/actors/${encodeURIComponent(actor)}/metrics/history`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.v1.GetActorMetricsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError( + "Timeout exceeded when calling GET /actors/{actor}/metrics/history.", + ); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts new file mode 100644 index 0000000000..060467b645 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * } + */ +export interface GetActorMetricsRequestQuery { + project?: string; + environment?: string; + start: number; + end: number; + interval: number; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/index.ts new file mode 100644 index 0000000000..7a8cf1debe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetActorMetricsRequestQuery } from "./GetActorMetricsRequestQuery"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts new file mode 100644 index 0000000000..35c68a3ffb --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GetActorMetricsResponse { + actorIds: string[]; + metricNames: string[]; + metricAttributes: Record[]; + metricTypes: string[]; + metricValues: number[][]; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/index.ts new file mode 100644 index 0000000000..c5cf235d4d --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetActorMetricsResponse"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts new file mode 100644 index 0000000000..77eeba1b34 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface CreateActorNetworkRequest { + mode?: Rivet.actors.v1.NetworkMode; + ports?: Record; + waitReady?: boolean; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorPortRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorPortRequest.ts new file mode 100644 index 0000000000..0abb6a0c00 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorPortRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface CreateActorPortRequest { + protocol: Rivet.actors.v1.PortProtocol; + internalPort?: number; + routing?: Rivet.actors.v1.PortRouting; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRequest.ts new file mode 100644 index 0000000000..31a31c4f41 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRequest.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface CreateActorRequest { + region?: string; + tags?: unknown; + build?: string; + buildTags?: unknown; + runtime?: Rivet.actors.v1.CreateActorRuntimeRequest; + network?: Rivet.actors.v1.CreateActorNetworkRequest; + resources?: Rivet.actors.v1.Resources; + lifecycle?: Rivet.actors.v1.Lifecycle; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorResponse.ts new file mode 100644 index 0000000000..7dbe3f341f --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorResponse.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface CreateActorResponse { + /** The actor that was created */ + actor: Rivet.actors.v1.Actor; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts new file mode 100644 index 0000000000..bc103c8f04 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface CreateActorRuntimeNetworkRequest { + endpointType: Rivet.actors.v1.EndpointType; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts new file mode 100644 index 0000000000..cace082218 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface CreateActorRuntimeRequest { + environment?: Record; + network?: Rivet.actors.v1.CreateActorRuntimeNetworkRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/DestroyActorResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/DestroyActorResponse.ts new file mode 100644 index 0000000000..6656d4884f --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/DestroyActorResponse.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface DestroyActorResponse {} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/GetActorResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/GetActorResponse.ts new file mode 100644 index 0000000000..8dbff216b1 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/GetActorResponse.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface GetActorResponse { + actor: Rivet.actors.v1.Actor; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/ListActorsResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/ListActorsResponse.ts new file mode 100644 index 0000000000..1262fb5887 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/ListActorsResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface ListActorsResponse { + /** A list of actors for the project associated with the token. */ + actors: Rivet.actors.v1.Actor[]; + pagination: Rivet.Pagination; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorRequest.ts new file mode 100644 index 0000000000..d559a790a1 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorRequest.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeActorRequest { + build?: string; + buildTags?: unknown; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorResponse.ts new file mode 100644 index 0000000000..6afcca95ec --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeActorResponse.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeActorResponse {} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts new file mode 100644 index 0000000000..0b6e4311c8 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeAllActorsRequest { + tags?: unknown; + build?: string; + buildTags?: unknown; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts new file mode 100644 index 0000000000..a1a71d02f8 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeAllActorsResponse { + count: number; +} diff --git a/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/index.ts b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/index.ts new file mode 100644 index 0000000000..9aab9504b0 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/actors/resources/v1/types/index.ts @@ -0,0 +1,13 @@ +export * from "./GetActorResponse"; +export * from "./CreateActorRequest"; +export * from "./CreateActorRuntimeRequest"; +export * from "./CreateActorRuntimeNetworkRequest"; +export * from "./CreateActorNetworkRequest"; +export * from "./CreateActorPortRequest"; +export * from "./CreateActorResponse"; +export * from "./DestroyActorResponse"; +export * from "./UpgradeActorRequest"; +export * from "./UpgradeActorResponse"; +export * from "./UpgradeAllActorsRequest"; +export * from "./UpgradeAllActorsResponse"; +export * from "./ListActorsResponse"; diff --git a/sdks/api/full/typescript/src/api/resources/actors/types/CreateActorRequest.ts b/sdks/api/full/typescript/src/api/resources/actors/types/CreateActorRequest.ts index 121fcf08bd..dc7b38fc28 100644 --- a/sdks/api/full/typescript/src/api/resources/actors/types/CreateActorRequest.ts +++ b/sdks/api/full/typescript/src/api/resources/actors/types/CreateActorRequest.ts @@ -11,6 +11,5 @@ export interface CreateActorRequest { buildTags?: unknown; runtime?: Rivet.actors.CreateActorRuntimeRequest; network?: Rivet.actors.CreateActorNetworkRequest; - resources?: Rivet.actors.Resources; lifecycle?: Rivet.actors.Lifecycle; } diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/Client.ts b/sdks/api/full/typescript/src/api/resources/containers/client/Client.ts new file mode 100644 index 0000000000..883a6bb63c --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/Client.ts @@ -0,0 +1,1067 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../environments"; +import * as core from "../../../../core"; +import * as Rivet from "../../../index"; +import * as serializers from "../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../errors/index"; +import { Logs } from "../resources/logs/client/Client"; +import { Metrics } from "../resources/metrics/client/Client"; + +export declare namespace Containers { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Containers { + protected _logs: Logs | undefined; + protected _metrics: Metrics | undefined; + + constructor(protected readonly _options: Containers.Options = {}) {} + + public get logs(): Logs { + return (this._logs ??= new Logs(this._options)); + } + + public get metrics(): Metrics { + return (this._metrics ??= new Metrics(this._options)); + } + + /** + * Gets a container. + * + * @param {Rivet.Id} container - The id of the container to destroy + * @param {Rivet.containers.ListContainersRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.get("string", { + * project: "string", + * environment: "string", + * endpointType: "hostname" + * }) + */ + public async get( + container: Rivet.Id, + request: Rivet.containers.ListContainersRequestQuery = {}, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, endpointType } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.containers.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.GetContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v1/containers/{container}."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Lists all containers associated with the token used. Can be filtered by tags in the query string. + * + * @param {Rivet.containers.GetContainersRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.list({ + * project: "string", + * environment: "string", + * endpointType: "hostname", + * tagsJson: "string", + * includeDestroyed: true, + * cursor: "string" + * }) + */ + public async list( + request: Rivet.containers.GetContainersRequestQuery = {}, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, endpointType, tagsJson, includeDestroyed, cursor } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.containers.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + if (tagsJson != null) { + _queryParams["tags_json"] = tagsJson; + } + + if (includeDestroyed != null) { + _queryParams["include_destroyed"] = includeDestroyed.toString(); + } + + if (cursor != null) { + _queryParams["cursor"] = cursor; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers", + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.ListContainersResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v1/containers."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Create a new container. + * + * @param {Rivet.containers.CreateContainerRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.create({ + * project: "string", + * environment: "string", + * endpointType: "hostname", + * body: { + * region: "string", + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * }, + * runtime: { + * environment: {}, + * network: { + * endpointType: "hostname" + * } + * }, + * network: { + * mode: "bridge", + * ports: {}, + * waitReady: true + * }, + * resources: { + * cpu: 1, + * memory: 1 + * }, + * lifecycle: { + * killTimeout: 1000000, + * durable: true + * } + * } + * }) + */ + public async create( + request: Rivet.containers.CreateContainerRequestQuery, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, endpointType, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.containers.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers", + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.containers.CreateContainerRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.CreateContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v1/containers."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Destroy a container. + * + * @param {Rivet.Id} container - The id of the container to destroy + * @param {Rivet.containers.DestroyContainerRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.destroy("string", { + * project: "string", + * environment: "string", + * overrideKillTimeout: 1000000 + * }) + */ + public async destroy( + container: Rivet.Id, + request: Rivet.containers.DestroyContainerRequestQuery = {}, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, overrideKillTimeout } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (overrideKillTimeout != null) { + _queryParams["override_kill_timeout"] = overrideKillTimeout.toString(); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}`, + ), + method: "DELETE", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.DestroyContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /v1/containers/{container}."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Upgrades a container. + * + * @param {Rivet.Id} container - The id of the container to upgrade + * @param {Rivet.containers.UpgradeContainerRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.upgrade("string", { + * project: "string", + * environment: "string", + * body: { + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * }) + */ + public async upgrade( + container: Rivet.Id, + request: Rivet.containers.UpgradeContainerRequestQuery, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}/upgrade`, + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.containers.UpgradeContainerRequest.jsonOrThrow(_body, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.UpgradeContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError( + "Timeout exceeded when calling POST /v1/containers/{container}/upgrade.", + ); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Upgrades all containers matching the given tags. + * + * @param {Rivet.containers.UpgradeAllContainersRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.upgradeAll({ + * project: "string", + * environment: "string", + * body: { + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * }) + */ + public async upgradeAll( + request: Rivet.containers.UpgradeAllContainersRequestQuery, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers/upgrade", + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.containers.UpgradeAllContainersRequest.jsonOrThrow(_body, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.UpgradeAllContainersResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v1/containers/upgrade."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/index.ts b/sdks/api/full/typescript/src/api/resources/containers/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts new file mode 100644 index 0000000000..4cc48c6f03 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts @@ -0,0 +1,49 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname", + * body: { + * region: "string", + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * }, + * runtime: { + * environment: {}, + * network: { + * endpointType: "hostname" + * } + * }, + * network: { + * mode: "bridge", + * ports: {}, + * waitReady: true + * }, + * resources: { + * cpu: 1, + * memory: 1 + * }, + * lifecycle: { + * killTimeout: 1000000, + * durable: true + * } + * } + * } + */ +export interface CreateContainerRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.containers.EndpointType; + body: Rivet.containers.CreateContainerRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts new file mode 100644 index 0000000000..bde4d00874 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * overrideKillTimeout: 1000000 + * } + */ +export interface DestroyContainerRequestQuery { + project?: string; + environment?: string; + /** + * The duration to wait for in milliseconds before killing the container. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. + */ + overrideKillTimeout?: number; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts new file mode 100644 index 0000000000..3455b96aab --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname", + * tagsJson: "string", + * includeDestroyed: true, + * cursor: "string" + * } + */ +export interface GetContainersRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.containers.EndpointType; + tagsJson?: string; + includeDestroyed?: boolean; + cursor?: string; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts new file mode 100644 index 0000000000..ee279d09c3 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts @@ -0,0 +1,19 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname" + * } + */ +export interface ListContainersRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.containers.EndpointType; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts new file mode 100644 index 0000000000..97b0fe6caa --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * body: { + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * } + */ +export interface UpgradeAllContainersRequestQuery { + project?: string; + environment?: string; + body: Rivet.containers.UpgradeAllContainersRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts new file mode 100644 index 0000000000..dd88b848d7 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * body: { + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * } + */ +export interface UpgradeContainerRequestQuery { + project?: string; + environment?: string; + body: Rivet.containers.UpgradeContainerRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/client/requests/index.ts b/sdks/api/full/typescript/src/api/resources/containers/client/requests/index.ts new file mode 100644 index 0000000000..6fdd1f8d4e --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/client/requests/index.ts @@ -0,0 +1,6 @@ +export { type ListContainersRequestQuery } from "./ListContainersRequestQuery"; +export { type GetContainersRequestQuery } from "./GetContainersRequestQuery"; +export { type CreateContainerRequestQuery } from "./CreateContainerRequestQuery"; +export { type DestroyContainerRequestQuery } from "./DestroyContainerRequestQuery"; +export { type UpgradeContainerRequestQuery } from "./UpgradeContainerRequestQuery"; +export { type UpgradeAllContainersRequestQuery } from "./UpgradeAllContainersRequestQuery"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/index.ts b/sdks/api/full/typescript/src/api/resources/containers/index.ts new file mode 100644 index 0000000000..a931b36375 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/index.ts @@ -0,0 +1,3 @@ +export * from "./types"; +export * from "./resources"; +export * from "./client"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Container.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Container.ts new file mode 100644 index 0000000000..3b8f94a422 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Container.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface Container { + id: Rivet.Id; + region: string; + tags?: unknown; + runtime: Rivet.containers.Runtime; + network: Rivet.containers.Network; + resources: Rivet.containers.Resources; + lifecycle: Rivet.containers.Lifecycle; + createdAt: Rivet.Timestamp; + startedAt?: Rivet.Timestamp; + destroyedAt?: Rivet.Timestamp; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts new file mode 100644 index 0000000000..3385b64fb7 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type EndpointType = "hostname" | "path"; +export const EndpointType = { + Hostname: "hostname", + Path: "path", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts new file mode 100644 index 0000000000..039f163247 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GuardRouting {} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts new file mode 100644 index 0000000000..cdd2164517 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface HostRouting {} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts new file mode 100644 index 0000000000..e45d39b181 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Lifecycle { + /** The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. */ + killTimeout?: number; + /** If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. */ + durable?: boolean; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Network.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Network.ts new file mode 100644 index 0000000000..7bb456e91d --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Network.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface Network { + mode: Rivet.containers.NetworkMode; + ports: Record; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts new file mode 100644 index 0000000000..14aee186e8 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type NetworkMode = "bridge" | "host"; +export const NetworkMode = { + Bridge: "bridge", + Host: "host", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Port.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Port.ts new file mode 100644 index 0000000000..0fa9c5e224 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Port.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface Port { + protocol: Rivet.containers.PortProtocol; + internalPort?: number; + hostname?: string; + port?: number; + path?: string; + /** Fully formed connection URL including protocol, hostname, port, and path, if applicable. */ + url?: string; + routing: Rivet.containers.PortRouting; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts new file mode 100644 index 0000000000..0ec9364df2 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type PortProtocol = "http" | "https" | "tcp" | "tcp_tls" | "udp"; +export const PortProtocol = { + Http: "http", + Https: "https", + Tcp: "tcp", + TcpTls: "tcp_tls", + Udp: "udp", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts new file mode 100644 index 0000000000..6ec92a6d0a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface PortRouting { + guard?: Rivet.containers.GuardRouting; + host?: Rivet.containers.HostRouting; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/Resources.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Resources.ts similarity index 100% rename from sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/Resources.ts rename to sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Resources.ts diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Runtime.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Runtime.ts new file mode 100644 index 0000000000..a918ef8387 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/Runtime.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Runtime { + build: string; + arguments?: string[]; + environment?: Record; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/index.ts new file mode 100644 index 0000000000..634dcaff02 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/common/types/index.ts @@ -0,0 +1,12 @@ +export * from "./Container"; +export * from "./Runtime"; +export * from "./Lifecycle"; +export * from "./Resources"; +export * from "./Network"; +export * from "./NetworkMode"; +export * from "./Port"; +export * from "./PortProtocol"; +export * from "./PortRouting"; +export * from "./GuardRouting"; +export * from "./HostRouting"; +export * from "./EndpointType"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/index.ts new file mode 100644 index 0000000000..8b57b1caf5 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/index.ts @@ -0,0 +1,8 @@ +export * as common from "./common"; +export * from "./common/types"; +export * as logs from "./logs"; +export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; +export * from "./logs/client/requests"; +export * from "./metrics/client/requests"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/Client.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/Client.ts new file mode 100644 index 0000000000..d8ff58ebf2 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/Client.ts @@ -0,0 +1,234 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../../index"; +import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../errors/index"; + +export declare namespace Logs { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Logs { + constructor(protected readonly _options: Logs.Options = {}) {} + + /** + * Returns the logs for a given container. + * + * @param {Rivet.containers.GetContainerLogsRequestQuery} request + * @param {Logs.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.logs.get({ + * project: "string", + * environment: "string", + * stream: "std_out", + * containerIdsJson: "string", + * searchText: "string", + * searchCaseSensitive: true, + * searchEnableRegex: true, + * watchIndex: "string" + * }) + */ + public async get( + request: Rivet.containers.GetContainerLogsRequestQuery, + requestOptions?: Logs.RequestOptions, + ): Promise { + const { + project, + environment, + stream, + containerIdsJson, + searchText, + searchCaseSensitive, + searchEnableRegex, + watchIndex, + } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["stream"] = serializers.containers.QueryLogStream.jsonOrThrow(stream, { + unrecognizedObjectKeys: "strip", + }); + _queryParams["container_ids_json"] = containerIdsJson; + if (searchText != null) { + _queryParams["search_text"] = searchText; + } + + if (searchCaseSensitive != null) { + _queryParams["search_case_sensitive"] = searchCaseSensitive.toString(); + } + + if (searchEnableRegex != null) { + _queryParams["search_enable_regex"] = searchEnableRegex.toString(); + } + + if (watchIndex != null) { + _queryParams["watch_index"] = watchIndex; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers/logs", + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.GetContainerLogsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v1/containers/logs."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts new file mode 100644 index 0000000000..261f94c592 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * stream: "std_out", + * containerIdsJson: "string", + * searchText: "string", + * searchCaseSensitive: true, + * searchEnableRegex: true, + * watchIndex: "string" + * } + */ +export interface GetContainerLogsRequestQuery { + project?: string; + environment?: string; + stream: Rivet.containers.QueryLogStream; + containerIdsJson: string; + searchText?: string; + searchCaseSensitive?: boolean; + searchEnableRegex?: boolean; + /** + * A query parameter denoting the requests watch index. + */ + watchIndex?: string; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts new file mode 100644 index 0000000000..fab7efe5ec --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetContainerLogsRequestQuery } from "./GetContainerLogsRequestQuery"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts new file mode 100644 index 0000000000..f64773d382 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface GetContainerLogsResponse { + /** List of container IDs in these logs. The order of these correspond to the index in the log entry. */ + containerIds: Rivet.Id[]; + /** Sorted old to new. */ + lines: string[]; + /** Sorted old to new. */ + timestamps: Rivet.Timestamp[]; + /** + * Streams the logs came from. + * + * 0 = stdout + * 1 = stderr + */ + streams: number[]; + /** List of flags denoting if this log is not directly from the container. */ + foreigns: boolean[]; + /** Index of the container that this log was for. Use this index to look the full ID in `container_ids`. */ + containerIndices: number[]; + watch: Rivet.WatchResponse; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts new file mode 100644 index 0000000000..556646c57e --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type QueryLogStream = "std_out" | "std_err" | "all"; +export const QueryLogStream = { + StdOut: "std_out", + StdErr: "std_err", + All: "all", +} as const; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/index.ts new file mode 100644 index 0000000000..0b4e4957cf --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/logs/types/index.ts @@ -0,0 +1,2 @@ +export * from "./GetContainerLogsResponse"; +export * from "./QueryLogStream"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/Client.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/Client.ts new file mode 100644 index 0000000000..9c02a77644 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/Client.ts @@ -0,0 +1,209 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../../index"; +import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../errors/index"; + +export declare namespace Metrics { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Metrics { + constructor(protected readonly _options: Metrics.Options = {}) {} + + /** + * Returns the metrics for a given container. + * + * @param {Rivet.Id} container - The id of the container to destroy + * @param {Rivet.containers.GetContainerMetricsRequestQuery} request + * @param {Metrics.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.metrics.get("string", { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * }) + */ + public async get( + container: Rivet.Id, + request: Rivet.containers.GetContainerMetricsRequestQuery, + requestOptions?: Metrics.RequestOptions, + ): Promise { + const { project, environment, start, end, interval } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["start"] = start.toString(); + _queryParams["end"] = end.toString(); + _queryParams["interval"] = interval.toString(); + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}/metrics/history`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.GetContainerMetricsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError( + "Timeout exceeded when calling GET /v1/containers/{container}/metrics/history.", + ); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts new file mode 100644 index 0000000000..4bd8cd4208 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * } + */ +export interface GetContainerMetricsRequestQuery { + project?: string; + environment?: string; + start: number; + end: number; + interval: number; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts new file mode 100644 index 0000000000..276cf89b5a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetContainerMetricsRequestQuery } from "./GetContainerMetricsRequestQuery"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts new file mode 100644 index 0000000000..a1cb89d5c0 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GetContainerMetricsResponse { + containerIds: string[]; + metricNames: string[]; + metricAttributes: Record[]; + metricTypes: string[]; + metricValues: number[][]; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/index.ts b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/index.ts new file mode 100644 index 0000000000..835db2ac20 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetContainerMetricsResponse"; diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts new file mode 100644 index 0000000000..928c65a843 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerNetworkRequest { + mode?: Rivet.containers.NetworkMode; + ports?: Record; + waitReady?: boolean; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts new file mode 100644 index 0000000000..5be8ff1e37 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerPortRequest { + protocol: Rivet.containers.PortProtocol; + internalPort?: number; + routing?: Rivet.containers.PortRouting; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRequest.ts new file mode 100644 index 0000000000..eacb98877b --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRequest.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerRequest { + region?: string; + tags?: unknown; + build?: string; + buildTags?: unknown; + runtime?: Rivet.containers.CreateContainerRuntimeRequest; + network?: Rivet.containers.CreateContainerNetworkRequest; + resources: Rivet.containers.Resources; + lifecycle?: Rivet.containers.Lifecycle; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerResponse.ts new file mode 100644 index 0000000000..86b274113c --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerResponse.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerResponse { + /** The container that was created */ + container: Rivet.containers.Container; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts new file mode 100644 index 0000000000..5d9748a261 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerRuntimeNetworkRequest { + endpointType: Rivet.containers.EndpointType; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts new file mode 100644 index 0000000000..a8589fcb6e --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerRuntimeRequest { + environment?: Record; + network?: Rivet.containers.CreateContainerRuntimeNetworkRequest; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts new file mode 100644 index 0000000000..2d37f14af9 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface DestroyContainerResponse {} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/GetContainerResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/types/GetContainerResponse.ts new file mode 100644 index 0000000000..feaf5dc7e2 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/GetContainerResponse.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface GetContainerResponse { + container: Rivet.containers.Container; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/ListContainersResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/types/ListContainersResponse.ts new file mode 100644 index 0000000000..c9d67e9383 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/ListContainersResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface ListContainersResponse { + /** A list of containers for the project associated with the token. */ + containers: Rivet.containers.Container[]; + pagination: Rivet.Pagination; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts new file mode 100644 index 0000000000..3be10284be --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeAllContainersRequest { + tags?: unknown; + build?: string; + buildTags?: unknown; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts new file mode 100644 index 0000000000..d9c8a485b0 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeAllContainersResponse { + count: number; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts new file mode 100644 index 0000000000..26d34a1bc8 --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeContainerRequest { + build?: string; + buildTags?: unknown; +} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts new file mode 100644 index 0000000000..6a6966dc4a --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeContainerResponse {} diff --git a/sdks/api/full/typescript/src/api/resources/containers/types/index.ts b/sdks/api/full/typescript/src/api/resources/containers/types/index.ts new file mode 100644 index 0000000000..436ea216ac --- /dev/null +++ b/sdks/api/full/typescript/src/api/resources/containers/types/index.ts @@ -0,0 +1,13 @@ +export * from "./GetContainerResponse"; +export * from "./CreateContainerRequest"; +export * from "./CreateContainerRuntimeRequest"; +export * from "./CreateContainerRuntimeNetworkRequest"; +export * from "./CreateContainerNetworkRequest"; +export * from "./CreateContainerPortRequest"; +export * from "./CreateContainerResponse"; +export * from "./DestroyContainerResponse"; +export * from "./UpgradeContainerRequest"; +export * from "./UpgradeContainerResponse"; +export * from "./UpgradeAllContainersRequest"; +export * from "./UpgradeAllContainersResponse"; +export * from "./ListContainersResponse"; diff --git a/sdks/api/full/typescript/src/api/resources/index.ts b/sdks/api/full/typescript/src/api/resources/index.ts index acff7d201e..a8b5d47543 100644 --- a/sdks/api/full/typescript/src/api/resources/index.ts +++ b/sdks/api/full/typescript/src/api/resources/index.ts @@ -1,6 +1,7 @@ export * as actors from "./actors"; export * as builds from "./builds"; export * as cloud from "./cloud"; +export * as containers from "./containers"; export * as coreIntercom from "./coreIntercom"; export * as edgeIntercom from "./edgeIntercom"; export * as group from "./group"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts index aad5b7be1c..0759888555 100644 --- a/sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts @@ -8,7 +8,6 @@ import * as core from "../../../../../../core"; import { Id } from "../../../../common/types/Id"; import { Runtime } from "./Runtime"; import { Network } from "./Network"; -import { Resources } from "./Resources"; import { Lifecycle } from "./Lifecycle"; import { Timestamp } from "../../../../common/types/Timestamp"; @@ -19,7 +18,6 @@ export const Actor: core.serialization.ObjectSchema = core.serialization.object({ - actorIds: core.serialization.property("actor_ids", core.serialization.list(Id)), + actorIds: core.serialization.property("actor_ids", core.serialization.list(core.serialization.string())), lines: core.serialization.list(core.serialization.string()), timestamps: core.serialization.list(Timestamp), streams: core.serialization.list(core.serialization.number()), @@ -24,7 +23,7 @@ export const GetActorLogsResponse: core.serialization.ObjectSchema< export declare namespace GetActorLogsResponse { export interface Raw { - actor_ids: Id.Raw[]; + actor_ids: string[]; lines: string[]; timestamps: Timestamp.Raw[]; streams: number[]; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/index.ts new file mode 100644 index 0000000000..3ce0a3e38e --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./resources"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Actor.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Actor.ts new file mode 100644 index 0000000000..5e479d272f --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Actor.ts @@ -0,0 +1,41 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; +import { Runtime } from "./Runtime"; +import { Network } from "./Network"; +import { Resources } from "./Resources"; +import { Lifecycle } from "./Lifecycle"; +import { Timestamp } from "../../../../../../common/types/Timestamp"; + +export const Actor: core.serialization.ObjectSchema = + core.serialization.object({ + id: core.serialization.string(), + region: core.serialization.string(), + tags: core.serialization.unknown(), + runtime: Runtime, + network: Network, + resources: Resources.optional(), + lifecycle: Lifecycle, + createdAt: core.serialization.property("created_at", Timestamp), + startedAt: core.serialization.property("started_at", Timestamp.optional()), + destroyedAt: core.serialization.property("destroyed_at", Timestamp.optional()), + }); + +export declare namespace Actor { + export interface Raw { + id: string; + region: string; + tags?: unknown; + runtime: Runtime.Raw; + network: Network.Raw; + resources?: Resources.Raw | null; + lifecycle: Lifecycle.Raw; + created_at: Timestamp.Raw; + started_at?: Timestamp.Raw | null; + destroyed_at?: Timestamp.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/EndpointType.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/EndpointType.ts new file mode 100644 index 0000000000..23034dd3c6 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/EndpointType.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const EndpointType: core.serialization.Schema< + serializers.actors.v1.EndpointType.Raw, + Rivet.actors.v1.EndpointType +> = core.serialization.enum_(["hostname", "path"]); + +export declare namespace EndpointType { + export type Raw = "hostname" | "path"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/GuardRouting.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/GuardRouting.ts new file mode 100644 index 0000000000..879f4a5e52 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/GuardRouting.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const GuardRouting: core.serialization.ObjectSchema< + serializers.actors.v1.GuardRouting.Raw, + Rivet.actors.v1.GuardRouting +> = core.serialization.object({}); + +export declare namespace GuardRouting { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/HostRouting.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/HostRouting.ts new file mode 100644 index 0000000000..c93b591613 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/HostRouting.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const HostRouting: core.serialization.ObjectSchema< + serializers.actors.v1.HostRouting.Raw, + Rivet.actors.v1.HostRouting +> = core.serialization.object({}); + +export declare namespace HostRouting { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Lifecycle.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Lifecycle.ts new file mode 100644 index 0000000000..ef21c68b02 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Lifecycle.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const Lifecycle: core.serialization.ObjectSchema< + serializers.actors.v1.Lifecycle.Raw, + Rivet.actors.v1.Lifecycle +> = core.serialization.object({ + killTimeout: core.serialization.property("kill_timeout", core.serialization.number().optional()), + durable: core.serialization.boolean().optional(), +}); + +export declare namespace Lifecycle { + export interface Raw { + kill_timeout?: number | null; + durable?: boolean | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Network.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Network.ts new file mode 100644 index 0000000000..ef3bb20194 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Network.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; +import { NetworkMode } from "./NetworkMode"; +import { Port } from "./Port"; + +export const Network: core.serialization.ObjectSchema = + core.serialization.object({ + mode: NetworkMode, + ports: core.serialization.record(core.serialization.string(), Port), + }); + +export declare namespace Network { + export interface Raw { + mode: NetworkMode.Raw; + ports: Record; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/NetworkMode.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/NetworkMode.ts new file mode 100644 index 0000000000..4530ea90e7 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/NetworkMode.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const NetworkMode: core.serialization.Schema< + serializers.actors.v1.NetworkMode.Raw, + Rivet.actors.v1.NetworkMode +> = core.serialization.enum_(["bridge", "host"]); + +export declare namespace NetworkMode { + export type Raw = "bridge" | "host"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Port.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Port.ts new file mode 100644 index 0000000000..d27cd5623f --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Port.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; +import { PortProtocol } from "./PortProtocol"; +import { PortRouting } from "./PortRouting"; + +export const Port: core.serialization.ObjectSchema = + core.serialization.object({ + protocol: PortProtocol, + internalPort: core.serialization.property("internal_port", core.serialization.number().optional()), + hostname: core.serialization.string().optional(), + port: core.serialization.number().optional(), + path: core.serialization.string().optional(), + url: core.serialization.string().optional(), + routing: PortRouting, + }); + +export declare namespace Port { + export interface Raw { + protocol: PortProtocol.Raw; + internal_port?: number | null; + hostname?: string | null; + port?: number | null; + path?: string | null; + url?: string | null; + routing: PortRouting.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortProtocol.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortProtocol.ts new file mode 100644 index 0000000000..969d8967ad --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortProtocol.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const PortProtocol: core.serialization.Schema< + serializers.actors.v1.PortProtocol.Raw, + Rivet.actors.v1.PortProtocol +> = core.serialization.enum_(["http", "https", "tcp", "tcp_tls", "udp"]); + +export declare namespace PortProtocol { + export type Raw = "http" | "https" | "tcp" | "tcp_tls" | "udp"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortRouting.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortRouting.ts new file mode 100644 index 0000000000..e8e4dd39c8 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/PortRouting.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; +import { GuardRouting } from "./GuardRouting"; +import { HostRouting } from "./HostRouting"; + +export const PortRouting: core.serialization.ObjectSchema< + serializers.actors.v1.PortRouting.Raw, + Rivet.actors.v1.PortRouting +> = core.serialization.object({ + guard: GuardRouting.optional(), + host: HostRouting.optional(), +}); + +export declare namespace PortRouting { + export interface Raw { + guard?: GuardRouting.Raw | null; + host?: HostRouting.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Resources.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Resources.ts new file mode 100644 index 0000000000..d81b0d5275 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Resources.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const Resources: core.serialization.ObjectSchema< + serializers.actors.v1.Resources.Raw, + Rivet.actors.v1.Resources +> = core.serialization.object({ + cpu: core.serialization.number(), + memory: core.serialization.number(), +}); + +export declare namespace Resources { + export interface Raw { + cpu: number; + memory: number; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Runtime.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Runtime.ts new file mode 100644 index 0000000000..ee844106f8 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/Runtime.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const Runtime: core.serialization.ObjectSchema = + core.serialization.object({ + build: core.serialization.string(), + arguments: core.serialization.list(core.serialization.string()).optional(), + environment: core.serialization.record(core.serialization.string(), core.serialization.string()).optional(), + }); + +export declare namespace Runtime { + export interface Raw { + build: string; + arguments?: string[] | null; + environment?: Record | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/index.ts new file mode 100644 index 0000000000..e8ecca191e --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/common/types/index.ts @@ -0,0 +1,12 @@ +export * from "./Actor"; +export * from "./Runtime"; +export * from "./Lifecycle"; +export * from "./Resources"; +export * from "./Network"; +export * from "./NetworkMode"; +export * from "./Port"; +export * from "./PortProtocol"; +export * from "./PortRouting"; +export * from "./GuardRouting"; +export * from "./HostRouting"; +export * from "./EndpointType"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/index.ts new file mode 100644 index 0000000000..3bb7903aa2 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/index.ts @@ -0,0 +1,6 @@ +export * as common from "./common"; +export * from "./common/types"; +export * as logs from "./logs"; +export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts new file mode 100644 index 0000000000..0b4177c559 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/GetActorLogsResponse.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; +import { Timestamp } from "../../../../../../common/types/Timestamp"; +import { WatchResponse } from "../../../../../../common/types/WatchResponse"; + +export const GetActorLogsResponse: core.serialization.ObjectSchema< + serializers.actors.v1.GetActorLogsResponse.Raw, + Rivet.actors.v1.GetActorLogsResponse +> = core.serialization.object({ + actorIds: core.serialization.property("actor_ids", core.serialization.list(core.serialization.string())), + lines: core.serialization.list(core.serialization.string()), + timestamps: core.serialization.list(Timestamp), + streams: core.serialization.list(core.serialization.number()), + actorIndices: core.serialization.property("actor_indices", core.serialization.list(core.serialization.number())), + watch: WatchResponse, +}); + +export declare namespace GetActorLogsResponse { + export interface Raw { + actor_ids: string[]; + lines: string[]; + timestamps: Timestamp.Raw[]; + streams: number[]; + actor_indices: number[]; + watch: WatchResponse.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts new file mode 100644 index 0000000000..581664b317 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/QueryLogStream.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const QueryLogStream: core.serialization.Schema< + serializers.actors.v1.QueryLogStream.Raw, + Rivet.actors.v1.QueryLogStream +> = core.serialization.enum_(["std_out", "std_err", "all"]); + +export declare namespace QueryLogStream { + export type Raw = "std_out" | "std_err" | "all"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/index.ts new file mode 100644 index 0000000000..a7db7fd2b2 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/logs/types/index.ts @@ -0,0 +1,2 @@ +export * from "./GetActorLogsResponse"; +export * from "./QueryLogStream"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts new file mode 100644 index 0000000000..fdf2dabf81 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/GetActorMetricsResponse.ts @@ -0,0 +1,34 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../../../index"; +import * as Rivet from "../../../../../../../../api/index"; +import * as core from "../../../../../../../../core"; + +export const GetActorMetricsResponse: core.serialization.ObjectSchema< + serializers.actors.v1.GetActorMetricsResponse.Raw, + Rivet.actors.v1.GetActorMetricsResponse +> = core.serialization.object({ + actorIds: core.serialization.property("actor_ids", core.serialization.list(core.serialization.string())), + metricNames: core.serialization.property("metric_names", core.serialization.list(core.serialization.string())), + metricAttributes: core.serialization.property( + "metric_attributes", + core.serialization.list(core.serialization.record(core.serialization.string(), core.serialization.string())), + ), + metricTypes: core.serialization.property("metric_types", core.serialization.list(core.serialization.string())), + metricValues: core.serialization.property( + "metric_values", + core.serialization.list(core.serialization.list(core.serialization.number())), + ), +}); + +export declare namespace GetActorMetricsResponse { + export interface Raw { + actor_ids: string[]; + metric_names: string[]; + metric_attributes: Record[]; + metric_types: string[]; + metric_values: number[][]; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/index.ts new file mode 100644 index 0000000000..c5cf235d4d --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetActorMetricsResponse"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts new file mode 100644 index 0000000000..4e2582d9dd --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorNetworkRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { NetworkMode } from "../resources/common/types/NetworkMode"; +import { CreateActorPortRequest } from "./CreateActorPortRequest"; + +export const CreateActorNetworkRequest: core.serialization.ObjectSchema< + serializers.actors.v1.CreateActorNetworkRequest.Raw, + Rivet.actors.v1.CreateActorNetworkRequest +> = core.serialization.object({ + mode: NetworkMode.optional(), + ports: core.serialization.record(core.serialization.string(), CreateActorPortRequest).optional(), + waitReady: core.serialization.property("wait_ready", core.serialization.boolean().optional()), +}); + +export declare namespace CreateActorNetworkRequest { + export interface Raw { + mode?: NetworkMode.Raw | null; + ports?: Record | null; + wait_ready?: boolean | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorPortRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorPortRequest.ts new file mode 100644 index 0000000000..956e489a0b --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorPortRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { PortProtocol } from "../resources/common/types/PortProtocol"; +import { PortRouting } from "../resources/common/types/PortRouting"; + +export const CreateActorPortRequest: core.serialization.ObjectSchema< + serializers.actors.v1.CreateActorPortRequest.Raw, + Rivet.actors.v1.CreateActorPortRequest +> = core.serialization.object({ + protocol: PortProtocol, + internalPort: core.serialization.property("internal_port", core.serialization.number().optional()), + routing: PortRouting.optional(), +}); + +export declare namespace CreateActorPortRequest { + export interface Raw { + protocol: PortProtocol.Raw; + internal_port?: number | null; + routing?: PortRouting.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRequest.ts new file mode 100644 index 0000000000..8316eb333f --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRequest.ts @@ -0,0 +1,38 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { CreateActorRuntimeRequest } from "./CreateActorRuntimeRequest"; +import { CreateActorNetworkRequest } from "./CreateActorNetworkRequest"; +import { Resources } from "../resources/common/types/Resources"; +import { Lifecycle } from "../resources/common/types/Lifecycle"; + +export const CreateActorRequest: core.serialization.ObjectSchema< + serializers.actors.v1.CreateActorRequest.Raw, + Rivet.actors.v1.CreateActorRequest +> = core.serialization.object({ + region: core.serialization.string().optional(), + tags: core.serialization.unknown(), + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), + runtime: CreateActorRuntimeRequest.optional(), + network: CreateActorNetworkRequest.optional(), + resources: Resources.optional(), + lifecycle: Lifecycle.optional(), +}); + +export declare namespace CreateActorRequest { + export interface Raw { + region?: string | null; + tags?: unknown; + build?: string | null; + build_tags?: unknown | null; + runtime?: CreateActorRuntimeRequest.Raw | null; + network?: CreateActorNetworkRequest.Raw | null; + resources?: Resources.Raw | null; + lifecycle?: Lifecycle.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorResponse.ts new file mode 100644 index 0000000000..d112bf8f15 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorResponse.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Actor } from "../resources/common/types/Actor"; + +export const CreateActorResponse: core.serialization.ObjectSchema< + serializers.actors.v1.CreateActorResponse.Raw, + Rivet.actors.v1.CreateActorResponse +> = core.serialization.object({ + actor: Actor, +}); + +export declare namespace CreateActorResponse { + export interface Raw { + actor: Actor.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts new file mode 100644 index 0000000000..0aa225df81 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeNetworkRequest.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { EndpointType } from "../resources/common/types/EndpointType"; + +export const CreateActorRuntimeNetworkRequest: core.serialization.ObjectSchema< + serializers.actors.v1.CreateActorRuntimeNetworkRequest.Raw, + Rivet.actors.v1.CreateActorRuntimeNetworkRequest +> = core.serialization.object({ + endpointType: core.serialization.property("endpoint_type", EndpointType), +}); + +export declare namespace CreateActorRuntimeNetworkRequest { + export interface Raw { + endpoint_type: EndpointType.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts new file mode 100644 index 0000000000..5170c4edea --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/CreateActorRuntimeRequest.ts @@ -0,0 +1,23 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { CreateActorRuntimeNetworkRequest } from "./CreateActorRuntimeNetworkRequest"; + +export const CreateActorRuntimeRequest: core.serialization.ObjectSchema< + serializers.actors.v1.CreateActorRuntimeRequest.Raw, + Rivet.actors.v1.CreateActorRuntimeRequest +> = core.serialization.object({ + environment: core.serialization.record(core.serialization.string(), core.serialization.string()).optional(), + network: CreateActorRuntimeNetworkRequest.optional(), +}); + +export declare namespace CreateActorRuntimeRequest { + export interface Raw { + environment?: Record | null; + network?: CreateActorRuntimeNetworkRequest.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/DestroyActorResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/DestroyActorResponse.ts new file mode 100644 index 0000000000..fd92b7efa4 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/DestroyActorResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const DestroyActorResponse: core.serialization.ObjectSchema< + serializers.actors.v1.DestroyActorResponse.Raw, + Rivet.actors.v1.DestroyActorResponse +> = core.serialization.object({}); + +export declare namespace DestroyActorResponse { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/GetActorResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/GetActorResponse.ts new file mode 100644 index 0000000000..da663a18ec --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/GetActorResponse.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Actor } from "../resources/common/types/Actor"; + +export const GetActorResponse: core.serialization.ObjectSchema< + serializers.actors.v1.GetActorResponse.Raw, + Rivet.actors.v1.GetActorResponse +> = core.serialization.object({ + actor: Actor, +}); + +export declare namespace GetActorResponse { + export interface Raw { + actor: Actor.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/ListActorsResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/ListActorsResponse.ts new file mode 100644 index 0000000000..6c4e7e7795 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/ListActorsResponse.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Actor } from "../resources/common/types/Actor"; +import { Pagination } from "../../../../common/types/Pagination"; + +export const ListActorsResponse: core.serialization.ObjectSchema< + serializers.actors.v1.ListActorsResponse.Raw, + Rivet.actors.v1.ListActorsResponse +> = core.serialization.object({ + actors: core.serialization.list(Actor), + pagination: Pagination, +}); + +export declare namespace ListActorsResponse { + export interface Raw { + actors: Actor.Raw[]; + pagination: Pagination.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorRequest.ts new file mode 100644 index 0000000000..376cd57d31 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorRequest.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const UpgradeActorRequest: core.serialization.ObjectSchema< + serializers.actors.v1.UpgradeActorRequest.Raw, + Rivet.actors.v1.UpgradeActorRequest +> = core.serialization.object({ + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), +}); + +export declare namespace UpgradeActorRequest { + export interface Raw { + build?: string | null; + build_tags?: unknown | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorResponse.ts new file mode 100644 index 0000000000..2e59baed7b --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeActorResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const UpgradeActorResponse: core.serialization.ObjectSchema< + serializers.actors.v1.UpgradeActorResponse.Raw, + Rivet.actors.v1.UpgradeActorResponse +> = core.serialization.object({}); + +export declare namespace UpgradeActorResponse { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts new file mode 100644 index 0000000000..65759182e0 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsRequest.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const UpgradeAllActorsRequest: core.serialization.ObjectSchema< + serializers.actors.v1.UpgradeAllActorsRequest.Raw, + Rivet.actors.v1.UpgradeAllActorsRequest +> = core.serialization.object({ + tags: core.serialization.unknown(), + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), +}); + +export declare namespace UpgradeAllActorsRequest { + export interface Raw { + tags?: unknown; + build?: string | null; + build_tags?: unknown | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts new file mode 100644 index 0000000000..fd0ef63649 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/UpgradeAllActorsResponse.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const UpgradeAllActorsResponse: core.serialization.ObjectSchema< + serializers.actors.v1.UpgradeAllActorsResponse.Raw, + Rivet.actors.v1.UpgradeAllActorsResponse +> = core.serialization.object({ + count: core.serialization.number(), +}); + +export declare namespace UpgradeAllActorsResponse { + export interface Raw { + count: number; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/index.ts new file mode 100644 index 0000000000..9aab9504b0 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/actors/resources/v1/types/index.ts @@ -0,0 +1,13 @@ +export * from "./GetActorResponse"; +export * from "./CreateActorRequest"; +export * from "./CreateActorRuntimeRequest"; +export * from "./CreateActorRuntimeNetworkRequest"; +export * from "./CreateActorNetworkRequest"; +export * from "./CreateActorPortRequest"; +export * from "./CreateActorResponse"; +export * from "./DestroyActorResponse"; +export * from "./UpgradeActorRequest"; +export * from "./UpgradeActorResponse"; +export * from "./UpgradeAllActorsRequest"; +export * from "./UpgradeAllActorsResponse"; +export * from "./ListActorsResponse"; diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts b/sdks/api/full/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts index 927681ed0d..564b80275e 100644 --- a/sdks/api/full/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts +++ b/sdks/api/full/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts @@ -7,7 +7,6 @@ import * as Rivet from "../../../../api/index"; import * as core from "../../../../core"; import { CreateActorRuntimeRequest } from "./CreateActorRuntimeRequest"; import { CreateActorNetworkRequest } from "./CreateActorNetworkRequest"; -import { Resources } from "../resources/common/types/Resources"; import { Lifecycle } from "../resources/common/types/Lifecycle"; export const CreateActorRequest: core.serialization.ObjectSchema< @@ -20,7 +19,6 @@ export const CreateActorRequest: core.serialization.ObjectSchema< buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), runtime: CreateActorRuntimeRequest.optional(), network: CreateActorNetworkRequest.optional(), - resources: Resources.optional(), lifecycle: Lifecycle.optional(), }); @@ -32,7 +30,6 @@ export declare namespace CreateActorRequest { build_tags?: unknown | null; runtime?: CreateActorRuntimeRequest.Raw | null; network?: CreateActorNetworkRequest.Raw | null; - resources?: Resources.Raw | null; lifecycle?: Lifecycle.Raw | null; } } diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/index.ts new file mode 100644 index 0000000000..3ce0a3e38e --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./resources"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Container.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Container.ts new file mode 100644 index 0000000000..003549295d --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Container.ts @@ -0,0 +1,44 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Id } from "../../../../common/types/Id"; +import { Runtime } from "./Runtime"; +import { Network } from "./Network"; +import { Resources } from "./Resources"; +import { Lifecycle } from "./Lifecycle"; +import { Timestamp } from "../../../../common/types/Timestamp"; + +export const Container: core.serialization.ObjectSchema< + serializers.containers.Container.Raw, + Rivet.containers.Container +> = core.serialization.object({ + id: Id, + region: core.serialization.string(), + tags: core.serialization.unknown(), + runtime: Runtime, + network: Network, + resources: Resources, + lifecycle: Lifecycle, + createdAt: core.serialization.property("created_at", Timestamp), + startedAt: core.serialization.property("started_at", Timestamp.optional()), + destroyedAt: core.serialization.property("destroyed_at", Timestamp.optional()), +}); + +export declare namespace Container { + export interface Raw { + id: Id.Raw; + region: string; + tags?: unknown; + runtime: Runtime.Raw; + network: Network.Raw; + resources: Resources.Raw; + lifecycle: Lifecycle.Raw; + created_at: Timestamp.Raw; + started_at?: Timestamp.Raw | null; + destroyed_at?: Timestamp.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts new file mode 100644 index 0000000000..2897fe943a --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const EndpointType: core.serialization.Schema< + serializers.containers.EndpointType.Raw, + Rivet.containers.EndpointType +> = core.serialization.enum_(["hostname", "path"]); + +export declare namespace EndpointType { + export type Raw = "hostname" | "path"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts new file mode 100644 index 0000000000..19859fe146 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const GuardRouting: core.serialization.ObjectSchema< + serializers.containers.GuardRouting.Raw, + Rivet.containers.GuardRouting +> = core.serialization.object({}); + +export declare namespace GuardRouting { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts new file mode 100644 index 0000000000..7af0c934be --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const HostRouting: core.serialization.ObjectSchema< + serializers.containers.HostRouting.Raw, + Rivet.containers.HostRouting +> = core.serialization.object({}); + +export declare namespace HostRouting { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts new file mode 100644 index 0000000000..dfce2b7327 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const Lifecycle: core.serialization.ObjectSchema< + serializers.containers.Lifecycle.Raw, + Rivet.containers.Lifecycle +> = core.serialization.object({ + killTimeout: core.serialization.property("kill_timeout", core.serialization.number().optional()), + durable: core.serialization.boolean().optional(), +}); + +export declare namespace Lifecycle { + export interface Raw { + kill_timeout?: number | null; + durable?: boolean | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Network.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Network.ts new file mode 100644 index 0000000000..8e4600735c --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Network.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { NetworkMode } from "./NetworkMode"; +import { Port } from "./Port"; + +export const Network: core.serialization.ObjectSchema = + core.serialization.object({ + mode: NetworkMode, + ports: core.serialization.record(core.serialization.string(), Port), + }); + +export declare namespace Network { + export interface Raw { + mode: NetworkMode.Raw; + ports: Record; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts new file mode 100644 index 0000000000..69d5581298 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const NetworkMode: core.serialization.Schema< + serializers.containers.NetworkMode.Raw, + Rivet.containers.NetworkMode +> = core.serialization.enum_(["bridge", "host"]); + +export declare namespace NetworkMode { + export type Raw = "bridge" | "host"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Port.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Port.ts new file mode 100644 index 0000000000..3c9cb6d654 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Port.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { PortProtocol } from "./PortProtocol"; +import { PortRouting } from "./PortRouting"; + +export const Port: core.serialization.ObjectSchema = + core.serialization.object({ + protocol: PortProtocol, + internalPort: core.serialization.property("internal_port", core.serialization.number().optional()), + hostname: core.serialization.string().optional(), + port: core.serialization.number().optional(), + path: core.serialization.string().optional(), + url: core.serialization.string().optional(), + routing: PortRouting, + }); + +export declare namespace Port { + export interface Raw { + protocol: PortProtocol.Raw; + internal_port?: number | null; + hostname?: string | null; + port?: number | null; + path?: string | null; + url?: string | null; + routing: PortRouting.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts new file mode 100644 index 0000000000..c45d0c9be2 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const PortProtocol: core.serialization.Schema< + serializers.containers.PortProtocol.Raw, + Rivet.containers.PortProtocol +> = core.serialization.enum_(["http", "https", "tcp", "tcp_tls", "udp"]); + +export declare namespace PortProtocol { + export type Raw = "http" | "https" | "tcp" | "tcp_tls" | "udp"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts new file mode 100644 index 0000000000..e84aace739 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { GuardRouting } from "./GuardRouting"; +import { HostRouting } from "./HostRouting"; + +export const PortRouting: core.serialization.ObjectSchema< + serializers.containers.PortRouting.Raw, + Rivet.containers.PortRouting +> = core.serialization.object({ + guard: GuardRouting.optional(), + host: HostRouting.optional(), +}); + +export declare namespace PortRouting { + export interface Raw { + guard?: GuardRouting.Raw | null; + host?: HostRouting.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Resources.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Resources.ts similarity index 58% rename from sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Resources.ts rename to sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Resources.ts index 02d2b0188f..f2cdd4f846 100644 --- a/sdks/api/full/typescript/src/serialization/resources/actors/resources/common/types/Resources.ts +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Resources.ts @@ -6,11 +6,13 @@ import * as serializers from "../../../../../index"; import * as Rivet from "../../../../../../api/index"; import * as core from "../../../../../../core"; -export const Resources: core.serialization.ObjectSchema = - core.serialization.object({ - cpu: core.serialization.number(), - memory: core.serialization.number(), - }); +export const Resources: core.serialization.ObjectSchema< + serializers.containers.Resources.Raw, + Rivet.containers.Resources +> = core.serialization.object({ + cpu: core.serialization.number(), + memory: core.serialization.number(), +}); export declare namespace Resources { export interface Raw { diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts new file mode 100644 index 0000000000..2966bdc0d1 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const Runtime: core.serialization.ObjectSchema = + core.serialization.object({ + build: core.serialization.string(), + arguments: core.serialization.list(core.serialization.string()).optional(), + environment: core.serialization.record(core.serialization.string(), core.serialization.string()).optional(), + }); + +export declare namespace Runtime { + export interface Raw { + build: string; + arguments?: string[] | null; + environment?: Record | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/index.ts new file mode 100644 index 0000000000..634dcaff02 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/common/types/index.ts @@ -0,0 +1,12 @@ +export * from "./Container"; +export * from "./Runtime"; +export * from "./Lifecycle"; +export * from "./Resources"; +export * from "./Network"; +export * from "./NetworkMode"; +export * from "./Port"; +export * from "./PortProtocol"; +export * from "./PortRouting"; +export * from "./GuardRouting"; +export * from "./HostRouting"; +export * from "./EndpointType"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/index.ts new file mode 100644 index 0000000000..3bb7903aa2 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/index.ts @@ -0,0 +1,6 @@ +export * as common from "./common"; +export * from "./common/types"; +export * as logs from "./logs"; +export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts new file mode 100644 index 0000000000..53d8a03a79 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts @@ -0,0 +1,38 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Id } from "../../../../common/types/Id"; +import { Timestamp } from "../../../../common/types/Timestamp"; +import { WatchResponse } from "../../../../common/types/WatchResponse"; + +export const GetContainerLogsResponse: core.serialization.ObjectSchema< + serializers.containers.GetContainerLogsResponse.Raw, + Rivet.containers.GetContainerLogsResponse +> = core.serialization.object({ + containerIds: core.serialization.property("container_ids", core.serialization.list(Id)), + lines: core.serialization.list(core.serialization.string()), + timestamps: core.serialization.list(Timestamp), + streams: core.serialization.list(core.serialization.number()), + foreigns: core.serialization.list(core.serialization.boolean()), + containerIndices: core.serialization.property( + "container_indices", + core.serialization.list(core.serialization.number()), + ), + watch: WatchResponse, +}); + +export declare namespace GetContainerLogsResponse { + export interface Raw { + container_ids: Id.Raw[]; + lines: string[]; + timestamps: Timestamp.Raw[]; + streams: number[]; + foreigns: boolean[]; + container_indices: number[]; + watch: WatchResponse.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts new file mode 100644 index 0000000000..c76ce49c84 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const QueryLogStream: core.serialization.Schema< + serializers.containers.QueryLogStream.Raw, + Rivet.containers.QueryLogStream +> = core.serialization.enum_(["std_out", "std_err", "all"]); + +export declare namespace QueryLogStream { + export type Raw = "std_out" | "std_err" | "all"; +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/index.ts new file mode 100644 index 0000000000..0b4e4957cf --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/logs/types/index.ts @@ -0,0 +1,2 @@ +export * from "./GetContainerLogsResponse"; +export * from "./QueryLogStream"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts new file mode 100644 index 0000000000..66f87c2ecc --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts @@ -0,0 +1,34 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const GetContainerMetricsResponse: core.serialization.ObjectSchema< + serializers.containers.GetContainerMetricsResponse.Raw, + Rivet.containers.GetContainerMetricsResponse +> = core.serialization.object({ + containerIds: core.serialization.property("container_ids", core.serialization.list(core.serialization.string())), + metricNames: core.serialization.property("metric_names", core.serialization.list(core.serialization.string())), + metricAttributes: core.serialization.property( + "metric_attributes", + core.serialization.list(core.serialization.record(core.serialization.string(), core.serialization.string())), + ), + metricTypes: core.serialization.property("metric_types", core.serialization.list(core.serialization.string())), + metricValues: core.serialization.property( + "metric_values", + core.serialization.list(core.serialization.list(core.serialization.number())), + ), +}); + +export declare namespace GetContainerMetricsResponse { + export interface Raw { + container_ids: string[]; + metric_names: string[]; + metric_attributes: Record[]; + metric_types: string[]; + metric_values: number[][]; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts new file mode 100644 index 0000000000..835db2ac20 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetContainerMetricsResponse"; diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts new file mode 100644 index 0000000000..9da2cd4cc8 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { NetworkMode } from "../resources/common/types/NetworkMode"; +import { CreateContainerPortRequest } from "./CreateContainerPortRequest"; + +export const CreateContainerNetworkRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerNetworkRequest.Raw, + Rivet.containers.CreateContainerNetworkRequest +> = core.serialization.object({ + mode: NetworkMode.optional(), + ports: core.serialization.record(core.serialization.string(), CreateContainerPortRequest).optional(), + waitReady: core.serialization.property("wait_ready", core.serialization.boolean().optional()), +}); + +export declare namespace CreateContainerNetworkRequest { + export interface Raw { + mode?: NetworkMode.Raw | null; + ports?: Record | null; + wait_ready?: boolean | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts new file mode 100644 index 0000000000..9016ed4e9f --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { PortProtocol } from "../resources/common/types/PortProtocol"; +import { PortRouting } from "../resources/common/types/PortRouting"; + +export const CreateContainerPortRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerPortRequest.Raw, + Rivet.containers.CreateContainerPortRequest +> = core.serialization.object({ + protocol: PortProtocol, + internalPort: core.serialization.property("internal_port", core.serialization.number().optional()), + routing: PortRouting.optional(), +}); + +export declare namespace CreateContainerPortRequest { + export interface Raw { + protocol: PortProtocol.Raw; + internal_port?: number | null; + routing?: PortRouting.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts new file mode 100644 index 0000000000..d929186141 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts @@ -0,0 +1,38 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { CreateContainerRuntimeRequest } from "./CreateContainerRuntimeRequest"; +import { CreateContainerNetworkRequest } from "./CreateContainerNetworkRequest"; +import { Resources } from "../resources/common/types/Resources"; +import { Lifecycle } from "../resources/common/types/Lifecycle"; + +export const CreateContainerRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerRequest.Raw, + Rivet.containers.CreateContainerRequest +> = core.serialization.object({ + region: core.serialization.string().optional(), + tags: core.serialization.unknown(), + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), + runtime: CreateContainerRuntimeRequest.optional(), + network: CreateContainerNetworkRequest.optional(), + resources: Resources, + lifecycle: Lifecycle.optional(), +}); + +export declare namespace CreateContainerRequest { + export interface Raw { + region?: string | null; + tags?: unknown; + build?: string | null; + build_tags?: unknown | null; + runtime?: CreateContainerRuntimeRequest.Raw | null; + network?: CreateContainerNetworkRequest.Raw | null; + resources: Resources.Raw; + lifecycle?: Lifecycle.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts new file mode 100644 index 0000000000..91e5c48b1f --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { Container } from "../resources/common/types/Container"; + +export const CreateContainerResponse: core.serialization.ObjectSchema< + serializers.containers.CreateContainerResponse.Raw, + Rivet.containers.CreateContainerResponse +> = core.serialization.object({ + container: Container, +}); + +export declare namespace CreateContainerResponse { + export interface Raw { + container: Container.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts new file mode 100644 index 0000000000..adc30398c8 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { EndpointType } from "../resources/common/types/EndpointType"; + +export const CreateContainerRuntimeNetworkRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerRuntimeNetworkRequest.Raw, + Rivet.containers.CreateContainerRuntimeNetworkRequest +> = core.serialization.object({ + endpointType: core.serialization.property("endpoint_type", EndpointType), +}); + +export declare namespace CreateContainerRuntimeNetworkRequest { + export interface Raw { + endpoint_type: EndpointType.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts new file mode 100644 index 0000000000..b4cd6b0c0e --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts @@ -0,0 +1,23 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { CreateContainerRuntimeNetworkRequest } from "./CreateContainerRuntimeNetworkRequest"; + +export const CreateContainerRuntimeRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerRuntimeRequest.Raw, + Rivet.containers.CreateContainerRuntimeRequest +> = core.serialization.object({ + environment: core.serialization.record(core.serialization.string(), core.serialization.string()).optional(), + network: CreateContainerRuntimeNetworkRequest.optional(), +}); + +export declare namespace CreateContainerRuntimeRequest { + export interface Raw { + environment?: Record | null; + network?: CreateContainerRuntimeNetworkRequest.Raw | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts new file mode 100644 index 0000000000..d658a62fdf --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const DestroyContainerResponse: core.serialization.ObjectSchema< + serializers.containers.DestroyContainerResponse.Raw, + Rivet.containers.DestroyContainerResponse +> = core.serialization.object({}); + +export declare namespace DestroyContainerResponse { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts new file mode 100644 index 0000000000..e2c540bd61 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { Container } from "../resources/common/types/Container"; + +export const GetContainerResponse: core.serialization.ObjectSchema< + serializers.containers.GetContainerResponse.Raw, + Rivet.containers.GetContainerResponse +> = core.serialization.object({ + container: Container, +}); + +export declare namespace GetContainerResponse { + export interface Raw { + container: Container.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts new file mode 100644 index 0000000000..b2b6744862 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { Container } from "../resources/common/types/Container"; +import { Pagination } from "../../common/types/Pagination"; + +export const ListContainersResponse: core.serialization.ObjectSchema< + serializers.containers.ListContainersResponse.Raw, + Rivet.containers.ListContainersResponse +> = core.serialization.object({ + containers: core.serialization.list(Container), + pagination: Pagination, +}); + +export declare namespace ListContainersResponse { + export interface Raw { + containers: Container.Raw[]; + pagination: Pagination.Raw; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts new file mode 100644 index 0000000000..438ebaa375 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeAllContainersRequest: core.serialization.ObjectSchema< + serializers.containers.UpgradeAllContainersRequest.Raw, + Rivet.containers.UpgradeAllContainersRequest +> = core.serialization.object({ + tags: core.serialization.unknown(), + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), +}); + +export declare namespace UpgradeAllContainersRequest { + export interface Raw { + tags?: unknown; + build?: string | null; + build_tags?: unknown | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts new file mode 100644 index 0000000000..2b283e1f7e --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeAllContainersResponse: core.serialization.ObjectSchema< + serializers.containers.UpgradeAllContainersResponse.Raw, + Rivet.containers.UpgradeAllContainersResponse +> = core.serialization.object({ + count: core.serialization.number(), +}); + +export declare namespace UpgradeAllContainersResponse { + export interface Raw { + count: number; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts new file mode 100644 index 0000000000..c13050f711 --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeContainerRequest: core.serialization.ObjectSchema< + serializers.containers.UpgradeContainerRequest.Raw, + Rivet.containers.UpgradeContainerRequest +> = core.serialization.object({ + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), +}); + +export declare namespace UpgradeContainerRequest { + export interface Raw { + build?: string | null; + build_tags?: unknown | null; + } +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts new file mode 100644 index 0000000000..e88f337dce --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeContainerResponse: core.serialization.ObjectSchema< + serializers.containers.UpgradeContainerResponse.Raw, + Rivet.containers.UpgradeContainerResponse +> = core.serialization.object({}); + +export declare namespace UpgradeContainerResponse { + export interface Raw {} +} diff --git a/sdks/api/full/typescript/src/serialization/resources/containers/types/index.ts b/sdks/api/full/typescript/src/serialization/resources/containers/types/index.ts new file mode 100644 index 0000000000..436ea216ac --- /dev/null +++ b/sdks/api/full/typescript/src/serialization/resources/containers/types/index.ts @@ -0,0 +1,13 @@ +export * from "./GetContainerResponse"; +export * from "./CreateContainerRequest"; +export * from "./CreateContainerRuntimeRequest"; +export * from "./CreateContainerRuntimeNetworkRequest"; +export * from "./CreateContainerNetworkRequest"; +export * from "./CreateContainerPortRequest"; +export * from "./CreateContainerResponse"; +export * from "./DestroyContainerResponse"; +export * from "./UpgradeContainerRequest"; +export * from "./UpgradeContainerResponse"; +export * from "./UpgradeAllContainersRequest"; +export * from "./UpgradeAllContainersResponse"; +export * from "./ListContainersResponse"; diff --git a/sdks/api/full/typescript/src/serialization/resources/index.ts b/sdks/api/full/typescript/src/serialization/resources/index.ts index b25110b2b1..6fb2a20ec1 100644 --- a/sdks/api/full/typescript/src/serialization/resources/index.ts +++ b/sdks/api/full/typescript/src/serialization/resources/index.ts @@ -1,6 +1,7 @@ export * as actors from "./actors"; export * as builds from "./builds"; export * as cloud from "./cloud"; +export * as containers from "./containers"; export * as coreIntercom from "./coreIntercom"; export * as edgeIntercom from "./edgeIntercom"; export * as group from "./group"; diff --git a/sdks/api/runtime/go/actors/actors.go b/sdks/api/runtime/go/actors/actors.go index 353a82855a..bab2fcb7b2 100644 --- a/sdks/api/runtime/go/actors/actors.go +++ b/sdks/api/runtime/go/actors/actors.go @@ -17,7 +17,6 @@ type CreateActorRequest struct { BuildTags interface{} `json:"build_tags,omitempty"` Runtime *CreateActorRuntimeRequest `json:"runtime,omitempty"` Network *CreateActorNetworkRequest `json:"network,omitempty"` - Resources *Resources `json:"resources,omitempty"` Lifecycle *Lifecycle `json:"lifecycle,omitempty"` _rawJSON json.RawMessage diff --git a/sdks/api/runtime/go/actors/client/client.go b/sdks/api/runtime/go/actors/client/client.go index dadb1e321f..05cc263bb2 100644 --- a/sdks/api/runtime/go/actors/client/client.go +++ b/sdks/api/runtime/go/actors/client/client.go @@ -14,6 +14,7 @@ import ( sdk "sdk" actors "sdk/actors" logs "sdk/actors/logs" + metrics "sdk/actors/metrics" core "sdk/core" ) @@ -22,7 +23,8 @@ type Client struct { caller *core.Caller header http.Header - Logs *logs.Client + Logs *logs.Client + Metrics *metrics.Client } func NewClient(opts ...core.ClientOption) *Client { @@ -35,6 +37,7 @@ func NewClient(opts ...core.ClientOption) *Client { caller: core.NewCaller(options.HTTPClient), header: options.ToHeader(), Logs: logs.NewClient(opts...), + Metrics: metrics.NewClient(opts...), } } @@ -46,7 +49,7 @@ func (c *Client) Get(ctx context.Context, actor sdk.Id, request *actors.ListActo if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v", actor) queryParams := make(url.Values) if request.Project != nil { @@ -138,7 +141,7 @@ func (c *Client) List(ctx context.Context, request *actors.GetActorsRequestQuery if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors" + endpointURL := baseURL + "/" + "v2/actors" queryParams := make(url.Values) if request.Project != nil { @@ -239,7 +242,7 @@ func (c *Client) Create(ctx context.Context, request *actors.CreateActorRequestQ if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors" + endpointURL := baseURL + "/" + "v2/actors" queryParams := make(url.Values) if request.Project != nil { @@ -334,7 +337,7 @@ func (c *Client) Destroy(ctx context.Context, actor sdk.Id, request *actors.Dest if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v", actor) queryParams := make(url.Values) if request.Project != nil { @@ -428,7 +431,7 @@ func (c *Client) Upgrade(ctx context.Context, actor sdk.Id, request *actors.Upgr if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"actors/%v/upgrade", actor) + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v/upgrade", actor) queryParams := make(url.Values) if request.Project != nil { @@ -518,7 +521,7 @@ func (c *Client) UpgradeAll(ctx context.Context, request *actors.UpgradeAllActor if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors/upgrade" + endpointURL := baseURL + "/" + "v2/actors/upgrade" queryParams := make(url.Values) if request.Project != nil { diff --git a/sdks/api/runtime/go/actors/logs.go b/sdks/api/runtime/go/actors/logs.go index a2de673b79..0b026abb28 100644 --- a/sdks/api/runtime/go/actors/logs.go +++ b/sdks/api/runtime/go/actors/logs.go @@ -57,7 +57,7 @@ func (e *ExportActorLogsResponse) String() string { type GetActorLogsResponse struct { // List of actor IDs in these logs. The order of these correspond to the index in the log entry. - ActorIds []sdk.Id `json:"actor_ids,omitempty"` + ActorIds []string `json:"actor_ids,omitempty"` // Sorted old to new. Lines []string `json:"lines,omitempty"` // Sorted old to new. diff --git a/sdks/api/runtime/go/actors/logs/client.go b/sdks/api/runtime/go/actors/logs/client.go index 7906455cb4..64ecec339b 100644 --- a/sdks/api/runtime/go/actors/logs/client.go +++ b/sdks/api/runtime/go/actors/logs/client.go @@ -40,7 +40,7 @@ func (c *Client) Get(ctx context.Context, request *actors.GetActorLogsRequestQue if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "actors/logs" + endpointURL := baseURL + "/" + "v2/actors/logs" queryParams := make(url.Values) if request.Project != nil { diff --git a/sdks/api/runtime/go/actors/metrics.go b/sdks/api/runtime/go/actors/metrics.go new file mode 100644 index 0000000000..35bcd7b764 --- /dev/null +++ b/sdks/api/runtime/go/actors/metrics.go @@ -0,0 +1,50 @@ +// This file was auto-generated by Fern from our API Definition. + +package actors + +import ( + json "encoding/json" + fmt "fmt" + core "sdk/core" +) + +type GetActorMetricsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Start int `json:"-"` + End int `json:"-"` + Interval int `json:"-"` +} + +type GetActorMetricsResponse struct { + ActorIds []string `json:"actor_ids,omitempty"` + MetricNames []string `json:"metric_names,omitempty"` + MetricAttributes []map[string]string `json:"metric_attributes,omitempty"` + MetricTypes []string `json:"metric_types,omitempty"` + MetricValues [][]float64 `json:"metric_values,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetActorMetricsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetActorMetricsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetActorMetricsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetActorMetricsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} diff --git a/sdks/api/runtime/go/actors/metrics/client.go b/sdks/api/runtime/go/actors/metrics/client.go new file mode 100644 index 0000000000..54509951b0 --- /dev/null +++ b/sdks/api/runtime/go/actors/metrics/client.go @@ -0,0 +1,129 @@ +// This file was auto-generated by Fern from our API Definition. + +package metrics + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + actors "sdk/actors" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the metrics for a given actor. +// +// The id of the actor to destroy +func (c *Client) Get(ctx context.Context, actor sdk.Id, request *actors.GetActorMetricsRequestQuery) (*actors.GetActorMetricsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v2/actors/%v/metrics/history", actor) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("start", fmt.Sprintf("%v", request.Start)) + queryParams.Add("end", fmt.Sprintf("%v", request.End)) + queryParams.Add("interval", fmt.Sprintf("%v", request.Interval)) + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *actors.GetActorMetricsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/runtime/go/actors/types.go b/sdks/api/runtime/go/actors/types.go index c6795304b1..b5af2044e9 100644 --- a/sdks/api/runtime/go/actors/types.go +++ b/sdks/api/runtime/go/actors/types.go @@ -67,7 +67,6 @@ type Actor struct { Tags interface{} `json:"tags,omitempty"` Runtime *Runtime `json:"runtime,omitempty"` Network *Network `json:"network,omitempty"` - Resources *Resources `json:"resources,omitempty"` Lifecycle *Lifecycle `json:"lifecycle,omitempty"` CreatedAt sdk.Timestamp `json:"created_at"` StartedAt *sdk.Timestamp `json:"started_at,omitempty"` @@ -356,40 +355,6 @@ func (p *PortRouting) String() string { return fmt.Sprintf("%#v", p) } -type Resources struct { - // The number of CPU cores in millicores, or 1/1000 of a core. For example, - // 1/8 of a core would be 125 millicores, and 1 core would be 1000 - // millicores. - Cpu int `json:"cpu"` - // The amount of memory in megabytes - Memory int `json:"memory"` - - _rawJSON json.RawMessage -} - -func (r *Resources) UnmarshalJSON(data []byte) error { - type unmarshaler Resources - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *r = Resources(value) - r._rawJSON = json.RawMessage(data) - return nil -} - -func (r *Resources) String() string { - if len(r._rawJSON) > 0 { - if value, err := core.StringifyJSON(r._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(r); err == nil { - return value - } - return fmt.Sprintf("%#v", r) -} - type Runtime struct { Build uuid.UUID `json:"build"` Arguments []string `json:"arguments,omitempty"` diff --git a/sdks/api/runtime/go/client/client.go b/sdks/api/runtime/go/client/client.go index c3bb9480ff..74618e9f69 100644 --- a/sdks/api/runtime/go/client/client.go +++ b/sdks/api/runtime/go/client/client.go @@ -6,6 +6,7 @@ import ( http "net/http" actorsclient "sdk/actors/client" buildsclient "sdk/builds/client" + containersclient "sdk/containers/client" core "sdk/core" regionsclient "sdk/regions/client" routesclient "sdk/routes/client" @@ -16,10 +17,11 @@ type Client struct { caller *core.Caller header http.Header - Actors *actorsclient.Client - Builds *buildsclient.Client - Regions *regionsclient.Client - Routes *routesclient.Client + Actors *actorsclient.Client + Builds *buildsclient.Client + Containers *containersclient.Client + Regions *regionsclient.Client + Routes *routesclient.Client } func NewClient(opts ...core.ClientOption) *Client { @@ -28,12 +30,13 @@ func NewClient(opts ...core.ClientOption) *Client { opt(options) } return &Client{ - baseURL: options.BaseURL, - caller: core.NewCaller(options.HTTPClient), - header: options.ToHeader(), - Actors: actorsclient.NewClient(opts...), - Builds: buildsclient.NewClient(opts...), - Regions: regionsclient.NewClient(opts...), - Routes: routesclient.NewClient(opts...), + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + Actors: actorsclient.NewClient(opts...), + Builds: buildsclient.NewClient(opts...), + Containers: containersclient.NewClient(opts...), + Regions: regionsclient.NewClient(opts...), + Routes: routesclient.NewClient(opts...), } } diff --git a/sdks/api/runtime/go/containers/client/client.go b/sdks/api/runtime/go/containers/client/client.go new file mode 100644 index 0000000000..4e6244b72a --- /dev/null +++ b/sdks/api/runtime/go/containers/client/client.go @@ -0,0 +1,606 @@ +// This file was auto-generated by Fern from our API Definition. + +package client + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + containers "sdk/containers" + logs "sdk/containers/logs" + metrics "sdk/containers/metrics" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header + + Logs *logs.Client + Metrics *metrics.Client +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + Logs: logs.NewClient(opts...), + Metrics: metrics.NewClient(opts...), + } +} + +// Gets a container. +// +// The id of the container to destroy +func (c *Client) Get(ctx context.Context, container sdk.Id, request *containers.ListContainersRequestQuery) (*containers.GetContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.GetContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Lists all containers associated with the token used. Can be filtered by tags in the query string. +func (c *Client) List(ctx context.Context, request *containers.GetContainersRequestQuery) (*containers.ListContainersResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if request.TagsJson != nil { + queryParams.Add("tags_json", fmt.Sprintf("%v", *request.TagsJson)) + } + if request.IncludeDestroyed != nil { + queryParams.Add("include_destroyed", fmt.Sprintf("%v", *request.IncludeDestroyed)) + } + if request.Cursor != nil { + queryParams.Add("cursor", fmt.Sprintf("%v", *request.Cursor)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.ListContainersResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Create a new container. +func (c *Client) Create(ctx context.Context, request *containers.CreateContainerRequestQuery) (*containers.CreateContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.EndpointType != nil { + queryParams.Add("endpoint_type", fmt.Sprintf("%v", *request.EndpointType)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.CreateContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Destroy a container. +// +// The id of the container to destroy +func (c *Client) Destroy(ctx context.Context, container sdk.Id, request *containers.DestroyContainerRequestQuery) (*containers.DestroyContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if request.OverrideKillTimeout != nil { + queryParams.Add("override_kill_timeout", fmt.Sprintf("%v", *request.OverrideKillTimeout)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.DestroyContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodDelete, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Upgrades a container. +// +// The id of the container to upgrade +func (c *Client) Upgrade(ctx context.Context, container sdk.Id, request *containers.UpgradeContainerRequestQuery) (*containers.UpgradeContainerResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v/upgrade", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.UpgradeContainerResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} + +// Upgrades all containers matching the given tags. +func (c *Client) UpgradeAll(ctx context.Context, request *containers.UpgradeAllContainersRequestQuery) (*containers.UpgradeAllContainersResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers/upgrade" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.UpgradeAllContainersResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodPost, + Headers: c.header, + Request: request, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/runtime/go/containers/containers.go b/sdks/api/runtime/go/containers/containers.go new file mode 100644 index 0000000000..63b15b5a86 --- /dev/null +++ b/sdks/api/runtime/go/containers/containers.go @@ -0,0 +1,281 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + sdk "sdk" + core "sdk/core" +) + +type CreateContainerRequest struct { + Region *string `json:"region,omitempty"` + Tags interface{} `json:"tags,omitempty"` + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + Runtime *CreateContainerRuntimeRequest `json:"runtime,omitempty"` + Network *CreateContainerNetworkRequest `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerResponse struct { + // The container that was created + Container *Container `json:"container,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerResponse(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerResponse) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type DestroyContainerResponse struct { + _rawJSON json.RawMessage +} + +func (d *DestroyContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler DestroyContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *d = DestroyContainerResponse(value) + d._rawJSON = json.RawMessage(data) + return nil +} + +func (d *DestroyContainerResponse) String() string { + if len(d._rawJSON) > 0 { + if value, err := core.StringifyJSON(d._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(d); err == nil { + return value + } + return fmt.Sprintf("%#v", d) +} + +type GetContainerResponse struct { + Container *Container `json:"container,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetContainerResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetContainerResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type ListContainersResponse struct { + // A list of containers for the project associated with the token. + Containers []*Container `json:"containers,omitempty"` + Pagination *sdk.Pagination `json:"pagination,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *ListContainersResponse) UnmarshalJSON(data []byte) error { + type unmarshaler ListContainersResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = ListContainersResponse(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *ListContainersResponse) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type UpgradeAllContainersRequest struct { + Tags interface{} `json:"tags,omitempty"` + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeAllContainersRequest) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeAllContainersRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeAllContainersRequest(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeAllContainersRequest) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeAllContainersResponse struct { + Count int64 `json:"count"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeAllContainersResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeAllContainersResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeAllContainersResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeAllContainersResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeContainerRequest struct { + Build *uuid.UUID `json:"build,omitempty"` + BuildTags interface{} `json:"build_tags,omitempty"` + + _rawJSON json.RawMessage +} + +func (u *UpgradeContainerRequest) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeContainerRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeContainerRequest(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeContainerRequest) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} + +type UpgradeContainerResponse struct { + _rawJSON json.RawMessage +} + +func (u *UpgradeContainerResponse) UnmarshalJSON(data []byte) error { + type unmarshaler UpgradeContainerResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *u = UpgradeContainerResponse(value) + u._rawJSON = json.RawMessage(data) + return nil +} + +func (u *UpgradeContainerResponse) String() string { + if len(u._rawJSON) > 0 { + if value, err := core.StringifyJSON(u._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(u); err == nil { + return value + } + return fmt.Sprintf("%#v", u) +} diff --git a/sdks/api/runtime/go/containers/logs.go b/sdks/api/runtime/go/containers/logs.go new file mode 100644 index 0000000000..3eec84bace --- /dev/null +++ b/sdks/api/runtime/go/containers/logs.go @@ -0,0 +1,91 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + sdk "sdk" + core "sdk/core" +) + +type GetContainerLogsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Stream QueryLogStream `json:"-"` + ContainerIdsJson string `json:"-"` + SearchText *string `json:"-"` + SearchCaseSensitive *bool `json:"-"` + SearchEnableRegex *bool `json:"-"` + // A query parameter denoting the requests watch index. + WatchIndex *string `json:"-"` +} + +type GetContainerLogsResponse struct { + // List of container IDs in these logs. The order of these correspond to the index in the log entry. + ContainerIds []sdk.Id `json:"container_ids,omitempty"` + // Sorted old to new. + Lines []string `json:"lines,omitempty"` + // Sorted old to new. + Timestamps []sdk.Timestamp `json:"timestamps,omitempty"` + // Streams the logs came from. + // + // 0 = stdout + // 1 = stderr + Streams []int `json:"streams,omitempty"` + // List of flags denoting if this log is not directly from the container. + Foreigns []bool `json:"foreigns,omitempty"` + // Index of the container that this log was for. Use this index to look the full ID in `container_ids`. + ContainerIndices []int `json:"container_indices,omitempty"` + Watch *sdk.WatchResponse `json:"watch,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetContainerLogsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetContainerLogsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetContainerLogsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetContainerLogsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type QueryLogStream string + +const ( + QueryLogStreamStdOut QueryLogStream = "std_out" + QueryLogStreamStdErr QueryLogStream = "std_err" + QueryLogStreamAll QueryLogStream = "all" +) + +func NewQueryLogStreamFromString(s string) (QueryLogStream, error) { + switch s { + case "std_out": + return QueryLogStreamStdOut, nil + case "std_err": + return QueryLogStreamStdErr, nil + case "all": + return QueryLogStreamAll, nil + } + var t QueryLogStream + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (q QueryLogStream) Ptr() *QueryLogStream { + return &q +} diff --git a/sdks/api/runtime/go/containers/logs/client.go b/sdks/api/runtime/go/containers/logs/client.go new file mode 100644 index 0000000000..be5ec19067 --- /dev/null +++ b/sdks/api/runtime/go/containers/logs/client.go @@ -0,0 +1,138 @@ +// This file was auto-generated by Fern from our API Definition. + +package logs + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + containers "sdk/containers" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the logs for a given container. +func (c *Client) Get(ctx context.Context, request *containers.GetContainerLogsRequestQuery) (*containers.GetContainerLogsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "v1/containers/logs" + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("stream", fmt.Sprintf("%v", request.Stream)) + queryParams.Add("container_ids_json", fmt.Sprintf("%v", request.ContainerIdsJson)) + if request.SearchText != nil { + queryParams.Add("search_text", fmt.Sprintf("%v", *request.SearchText)) + } + if request.SearchCaseSensitive != nil { + queryParams.Add("search_case_sensitive", fmt.Sprintf("%v", *request.SearchCaseSensitive)) + } + if request.SearchEnableRegex != nil { + queryParams.Add("search_enable_regex", fmt.Sprintf("%v", *request.SearchEnableRegex)) + } + if request.WatchIndex != nil { + queryParams.Add("watch_index", fmt.Sprintf("%v", *request.WatchIndex)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.GetContainerLogsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/runtime/go/containers/metrics.go b/sdks/api/runtime/go/containers/metrics.go new file mode 100644 index 0000000000..ba8e18bec0 --- /dev/null +++ b/sdks/api/runtime/go/containers/metrics.go @@ -0,0 +1,50 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + core "sdk/core" +) + +type GetContainerMetricsRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Start int `json:"-"` + End int `json:"-"` + Interval int `json:"-"` +} + +type GetContainerMetricsResponse struct { + ContainerIds []string `json:"container_ids,omitempty"` + MetricNames []string `json:"metric_names,omitempty"` + MetricAttributes []map[string]string `json:"metric_attributes,omitempty"` + MetricTypes []string `json:"metric_types,omitempty"` + MetricValues [][]float64 `json:"metric_values,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetContainerMetricsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetContainerMetricsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetContainerMetricsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetContainerMetricsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} diff --git a/sdks/api/runtime/go/containers/metrics/client.go b/sdks/api/runtime/go/containers/metrics/client.go new file mode 100644 index 0000000000..d891ab58d9 --- /dev/null +++ b/sdks/api/runtime/go/containers/metrics/client.go @@ -0,0 +1,129 @@ +// This file was auto-generated by Fern from our API Definition. + +package metrics + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + containers "sdk/containers" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +// Returns the metrics for a given container. +// +// The id of the container to destroy +func (c *Client) Get(ctx context.Context, container sdk.Id, request *containers.GetContainerMetricsRequestQuery) (*containers.GetContainerMetricsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/containers/%v/metrics/history", container) + + queryParams := make(url.Values) + if request.Project != nil { + queryParams.Add("project", fmt.Sprintf("%v", *request.Project)) + } + if request.Environment != nil { + queryParams.Add("environment", fmt.Sprintf("%v", *request.Environment)) + } + queryParams.Add("start", fmt.Sprintf("%v", request.Start)) + queryParams.Add("end", fmt.Sprintf("%v", request.End)) + queryParams.Add("interval", fmt.Sprintf("%v", request.Interval)) + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *containers.GetContainerMetricsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/api/runtime/go/containers/types.go b/sdks/api/runtime/go/containers/types.go new file mode 100644 index 0000000000..383dcf1cc9 --- /dev/null +++ b/sdks/api/runtime/go/containers/types.go @@ -0,0 +1,572 @@ +// This file was auto-generated by Fern from our API Definition. + +package containers + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + sdk "sdk" + core "sdk/core" +) + +type CreateContainerRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` + Body *CreateContainerRequest `json:"-"` +} + +func (c *CreateContainerRequestQuery) UnmarshalJSON(data []byte) error { + body := new(CreateContainerRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + c.Body = body + return nil +} + +func (c *CreateContainerRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(c.Body) +} + +type DestroyContainerRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + // The duration to wait for in milliseconds before killing the container. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. + OverrideKillTimeout *int64 `json:"-"` +} + +type ListContainersRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` +} + +type GetContainersRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + EndpointType *EndpointType `json:"-"` + TagsJson *string `json:"-"` + IncludeDestroyed *bool `json:"-"` + Cursor *string `json:"-"` +} + +type Container struct { + Id sdk.Id `json:"id"` + Region string `json:"region"` + Tags interface{} `json:"tags,omitempty"` + Runtime *Runtime `json:"runtime,omitempty"` + Network *Network `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + CreatedAt sdk.Timestamp `json:"created_at"` + StartedAt *sdk.Timestamp `json:"started_at,omitempty"` + DestroyedAt *sdk.Timestamp `json:"destroyed_at,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *Container) UnmarshalJSON(data []byte) error { + type unmarshaler Container + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = Container(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *Container) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type EndpointType string + +const ( + EndpointTypeHostname EndpointType = "hostname" + EndpointTypePath EndpointType = "path" +) + +func NewEndpointTypeFromString(s string) (EndpointType, error) { + switch s { + case "hostname": + return EndpointTypeHostname, nil + case "path": + return EndpointTypePath, nil + } + var t EndpointType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EndpointType) Ptr() *EndpointType { + return &e +} + +type GuardRouting struct { + _rawJSON json.RawMessage +} + +func (g *GuardRouting) UnmarshalJSON(data []byte) error { + type unmarshaler GuardRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GuardRouting(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GuardRouting) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} + +type HostRouting struct { + _rawJSON json.RawMessage +} + +func (h *HostRouting) UnmarshalJSON(data []byte) error { + type unmarshaler HostRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *h = HostRouting(value) + h._rawJSON = json.RawMessage(data) + return nil +} + +func (h *HostRouting) String() string { + if len(h._rawJSON) > 0 { + if value, err := core.StringifyJSON(h._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(h); err == nil { + return value + } + return fmt.Sprintf("%#v", h) +} + +type Lifecycle struct { + // The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. + KillTimeout *int64 `json:"kill_timeout,omitempty"` + // If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. + Durable *bool `json:"durable,omitempty"` + + _rawJSON json.RawMessage +} + +func (l *Lifecycle) UnmarshalJSON(data []byte) error { + type unmarshaler Lifecycle + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *l = Lifecycle(value) + l._rawJSON = json.RawMessage(data) + return nil +} + +func (l *Lifecycle) String() string { + if len(l._rawJSON) > 0 { + if value, err := core.StringifyJSON(l._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(l); err == nil { + return value + } + return fmt.Sprintf("%#v", l) +} + +type Network struct { + Mode NetworkMode `json:"mode,omitempty"` + Ports map[string]*Port `json:"ports,omitempty"` + + _rawJSON json.RawMessage +} + +func (n *Network) UnmarshalJSON(data []byte) error { + type unmarshaler Network + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *n = Network(value) + n._rawJSON = json.RawMessage(data) + return nil +} + +func (n *Network) String() string { + if len(n._rawJSON) > 0 { + if value, err := core.StringifyJSON(n._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(n); err == nil { + return value + } + return fmt.Sprintf("%#v", n) +} + +type NetworkMode string + +const ( + NetworkModeBridge NetworkMode = "bridge" + NetworkModeHost NetworkMode = "host" +) + +func NewNetworkModeFromString(s string) (NetworkMode, error) { + switch s { + case "bridge": + return NetworkModeBridge, nil + case "host": + return NetworkModeHost, nil + } + var t NetworkMode + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (n NetworkMode) Ptr() *NetworkMode { + return &n +} + +type Port struct { + Protocol PortProtocol `json:"protocol,omitempty"` + InternalPort *int `json:"internal_port,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Port *int `json:"port,omitempty"` + Path *string `json:"path,omitempty"` + // Fully formed connection URL including protocol, hostname, port, and path, if applicable. + Url *string `json:"url,omitempty"` + Routing *PortRouting `json:"routing,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *Port) UnmarshalJSON(data []byte) error { + type unmarshaler Port + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = Port(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *Port) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + +type PortProtocol string + +const ( + PortProtocolHttp PortProtocol = "http" + PortProtocolHttps PortProtocol = "https" + PortProtocolTcp PortProtocol = "tcp" + PortProtocolTcpTls PortProtocol = "tcp_tls" + PortProtocolUdp PortProtocol = "udp" +) + +func NewPortProtocolFromString(s string) (PortProtocol, error) { + switch s { + case "http": + return PortProtocolHttp, nil + case "https": + return PortProtocolHttps, nil + case "tcp": + return PortProtocolTcp, nil + case "tcp_tls": + return PortProtocolTcpTls, nil + case "udp": + return PortProtocolUdp, nil + } + var t PortProtocol + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (p PortProtocol) Ptr() *PortProtocol { + return &p +} + +type PortRouting struct { + Guard *GuardRouting `json:"guard,omitempty"` + Host *HostRouting `json:"host,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *PortRouting) UnmarshalJSON(data []byte) error { + type unmarshaler PortRouting + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = PortRouting(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *PortRouting) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + +type Resources struct { + // The number of CPU cores in millicores, or 1/1000 of a core. For example, + // 1/8 of a core would be 125 millicores, and 1 core would be 1000 + // millicores. + Cpu int `json:"cpu"` + // The amount of memory in megabytes + Memory int `json:"memory"` + + _rawJSON json.RawMessage +} + +func (r *Resources) UnmarshalJSON(data []byte) error { + type unmarshaler Resources + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *r = Resources(value) + r._rawJSON = json.RawMessage(data) + return nil +} + +func (r *Resources) String() string { + if len(r._rawJSON) > 0 { + if value, err := core.StringifyJSON(r._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(r); err == nil { + return value + } + return fmt.Sprintf("%#v", r) +} + +type Runtime struct { + Build uuid.UUID `json:"build"` + Arguments []string `json:"arguments,omitempty"` + Environment map[string]string `json:"environment,omitempty"` + + _rawJSON json.RawMessage +} + +func (r *Runtime) UnmarshalJSON(data []byte) error { + type unmarshaler Runtime + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *r = Runtime(value) + r._rawJSON = json.RawMessage(data) + return nil +} + +func (r *Runtime) String() string { + if len(r._rawJSON) > 0 { + if value, err := core.StringifyJSON(r._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(r); err == nil { + return value + } + return fmt.Sprintf("%#v", r) +} + +type CreateContainerNetworkRequest struct { + Mode *NetworkMode `json:"mode,omitempty"` + Ports map[string]*CreateContainerPortRequest `json:"ports,omitempty"` + WaitReady *bool `json:"wait_ready,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerNetworkRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerNetworkRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerNetworkRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerNetworkRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerPortRequest struct { + Protocol PortProtocol `json:"protocol,omitempty"` + InternalPort *int `json:"internal_port,omitempty"` + Routing *PortRouting `json:"routing,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerPortRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerPortRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerPortRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerPortRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerRuntimeNetworkRequest struct { + EndpointType EndpointType `json:"endpoint_type,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerRuntimeNetworkRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerRuntimeNetworkRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerRuntimeNetworkRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerRuntimeNetworkRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type CreateContainerRuntimeRequest struct { + Environment map[string]string `json:"environment,omitempty"` + Network *CreateContainerRuntimeNetworkRequest `json:"network,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *CreateContainerRuntimeRequest) UnmarshalJSON(data []byte) error { + type unmarshaler CreateContainerRuntimeRequest + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = CreateContainerRuntimeRequest(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *CreateContainerRuntimeRequest) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + +type UpgradeContainerRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Body *UpgradeContainerRequest `json:"-"` +} + +func (u *UpgradeContainerRequestQuery) UnmarshalJSON(data []byte) error { + body := new(UpgradeContainerRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + u.Body = body + return nil +} + +func (u *UpgradeContainerRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(u.Body) +} + +type UpgradeAllContainersRequestQuery struct { + Project *string `json:"-"` + Environment *string `json:"-"` + Body *UpgradeAllContainersRequest `json:"-"` +} + +func (u *UpgradeAllContainersRequestQuery) UnmarshalJSON(data []byte) error { + body := new(UpgradeAllContainersRequest) + if err := json.Unmarshal(data, &body); err != nil { + return err + } + u.Body = body + return nil +} + +func (u *UpgradeAllContainersRequestQuery) MarshalJSON() ([]byte, error) { + return json.Marshal(u.Body) +} diff --git a/sdks/api/runtime/openapi/openapi.yml b/sdks/api/runtime/openapi/openapi.yml index 1183d05020..5fc2031e59 100644 --- a/sdks/api/runtime/openapi/openapi.yml +++ b/sdks/api/runtime/openapi/openapi.yml @@ -3,7 +3,7 @@ info: title: Rivet API version: '' paths: - /actors/{actor}: + /v2/actors/{actor}: get: description: Gets a actor. operationId: actors_get @@ -152,7 +152,7 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors: + /v2/actors: get: description: >- Lists all actors associated with the token used. Can be filtered by tags @@ -306,7 +306,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsCreateActorRequest' - /actors/{actor}/upgrade: + /v2/actors/{actor}/upgrade: post: description: Upgrades a actor. operationId: actors_upgrade @@ -379,7 +379,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsUpgradeActorRequest' - /actors/upgrade: + /v2/actors/upgrade: post: description: Upgrades all actors matching the given tags. operationId: actors_upgradeAll @@ -954,12 +954,19 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /regions: + /v1/containers/{container}: get: - operationId: regions_list + description: Gets a container. + operationId: containers_get tags: - - Regions + - Containers parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -970,13 +977,18 @@ paths: required: false schema: type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RegionsListRegionsResponse' + $ref: '#/components/schemas/ContainersGetContainerResponse' '400': description: '' content: @@ -1013,12 +1025,19 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - /regions/recommend: - get: - operationId: regions_recommend + security: *ref_0 + delete: + description: Destroy a container. + operationId: containers_destroy tags: - - Regions + - Containers parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -1029,25 +1048,23 @@ paths: required: false schema: type: string - - name: lat - in: query - required: false - schema: - type: number - format: double - - name: long + - name: override_kill_timeout in: query + description: >- + The duration to wait for in milliseconds before killing the + container. This should be used to override the default kill timeout + if a faster time is needed, say for ignoring a graceful shutdown. required: false schema: - type: number - format: double + type: integer + format: int64 responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RegionsRecommendRegionResponse' + $ref: '#/components/schemas/ContainersDestroyContainerResponse' '400': description: '' content: @@ -1084,12 +1101,15 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - /routes: + security: *ref_0 + /v1/containers: get: - description: Lists all routes of the given environment. - operationId: routes_list + description: >- + Lists all containers associated with the token used. Can be filtered by + tags in the query string. + operationId: containers_list tags: - - Routes + - Containers parameters: - name: project in: query @@ -1101,13 +1121,33 @@ paths: required: false schema: type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RoutesListRoutesResponse' + $ref: '#/components/schemas/ContainersListContainersResponse' '400': description: '' content: @@ -1145,18 +1185,12 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /routes/{id}: - put: - description: Creates or updates a route. - operationId: routes_update + post: + description: Create a new container. + operationId: containers_create tags: - - Routes + - Containers parameters: - - name: id - in: path - required: true - schema: - type: string - name: project in: query required: false @@ -1167,13 +1201,18 @@ paths: required: false schema: type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RoutesUpdateRouteResponse' + $ref: '#/components/schemas/ContainersCreateContainerResponse' '400': description: '' content: @@ -1216,18 +1255,20 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/RoutesUpdateRouteBody' - delete: - description: Deletes a route. - operationId: routes_delete + $ref: '#/components/schemas/ContainersCreateContainerRequest' + /v1/containers/{container}/upgrade: + post: + description: Upgrades a container. + operationId: containers_upgrade tags: - - Routes + - Containers parameters: - - name: id + - name: container in: path + description: The id of the container to upgrade required: true schema: - type: string + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -1244,7 +1285,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/RoutesDeleteRouteResponse' + $ref: '#/components/schemas/ContainersUpgradeContainerResponse' '400': description: '' content: @@ -1282,14 +1323,18 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /routes/history: - get: - description: >- - Returns time series data for HTTP requests routed to actors. Allows - filtering and grouping by various request properties. - operationId: routes_history + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeContainerRequest' + /v1/containers/upgrade: + post: + description: Upgrades all containers matching the given tags. + operationId: containers_upgradeAll tags: - - Routes + - Containers parameters: - name: project in: query @@ -1301,36 +1346,69 @@ paths: required: false schema: type: string - - name: start - in: query - description: Start timestamp in milliseconds - required: true - schema: - type: integer - - name: end - in: query - description: End timestamp in milliseconds - required: true - schema: - type: integer - - name: interval - in: query - description: Time bucket interval in milliseconds - required: true - schema: - type: integer - - name: query_json + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersRequest' + /regions: + get: + operationId: regions_list + tags: + - Regions + parameters: + - name: project in: query - description: JSON-encoded query expression for filtering requests required: false schema: type: string - - name: group_by + - name: environment in: query - description: >- - JSON-encoded KeyPath for grouping results (e.g. - {"property":"client_request_host"} or - {"property":"tags","map_key":"version"}) required: false schema: type: string @@ -1340,7 +1418,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/RoutesHistoryResponse' + $ref: '#/components/schemas/RegionsListRegionsResponse' '400': description: '' content: @@ -1377,13 +1455,11 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - security: *ref_0 - /actors/logs: + /regions/recommend: get: - description: Returns the logs for a given actor. - operationId: actors_logs_get + operationId: regions_recommend tags: - - ActorsLogs + - Regions parameters: - name: project in: query @@ -1395,25 +1471,25 @@ paths: required: false schema: type: string - - name: query_json + - name: lat in: query - description: JSON-encoded query expression for filtering logs required: false schema: - type: string - - name: watch_index + type: number + format: double + - name: long in: query - description: A query parameter denoting the requests watch index. required: false schema: - type: string + type: number + format: double responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorLogsResponse' + $ref: '#/components/schemas/RegionsRecommendRegionResponse' '400': description: '' content: @@ -1450,23 +1526,30 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - security: *ref_0 - /actors/logs/export: - post: - description: >- - Exports logs for the given actors to an S3 bucket and returns a - presigned URL to download. - operationId: actors_logs_export + /routes: + get: + description: Lists all routes of the given environment. + operationId: routes_list tags: - - ActorsLogs - parameters: [] + - Routes + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsExportActorLogsResponse' + $ref: '#/components/schemas/RoutesListRoutesResponse' '400': description: '' content: @@ -1504,29 +1587,614 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - project: - type: string - environment: - type: string - query_json: - type: string - description: JSON-encoded query expression for filtering logs -components: - schemas: - ActorsGetActorResponse: - type: object - properties: - actor: - $ref: '#/components/schemas/ActorsActor' - required: - - actor + /routes/{id}: + put: + description: Creates or updates a route. + operationId: routes_update + tags: + - Routes + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesUpdateRouteResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesUpdateRouteBody' + delete: + description: Deletes a route. + operationId: routes_delete + tags: + - Routes + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesDeleteRouteResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 +<<<<<<< HEAD + /routes/history: + get: + description: >- + Returns time series data for HTTP requests routed to actors. Allows + filtering and grouping by various request properties. + operationId: routes_history + tags: + - Routes + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + description: Start timestamp in milliseconds + required: true + schema: + type: integer + - name: end + in: query + description: End timestamp in milliseconds + required: true + schema: + type: integer + - name: interval + in: query + description: Time bucket interval in milliseconds + required: true + schema: + type: integer + - name: query_json + in: query + description: JSON-encoded query expression for filtering requests + required: false + schema: + type: string + - name: group_by + in: query + description: >- + JSON-encoded KeyPath for grouping results (e.g. + {"property":"client_request_host"} or + {"property":"tags","map_key":"version"}) + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesHistoryResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /actors/logs: +======= + /v2/actors/logs: +>>>>>>> 43e5048bc (fix: api changes) + get: + description: Returns the logs for a given actor. + operationId: actors_logs_get + tags: + - ActorsLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: query_json + in: query + description: JSON-encoded query expression for filtering logs + required: false + schema: + type: string + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ActorsGetActorLogsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 +<<<<<<< HEAD + /actors/logs/export: + post: + description: >- + Exports logs for the given actors to an S3 bucket and returns a + presigned URL to download. + operationId: actors_logs_export + tags: + - ActorsLogs + parameters: [] +======= + /v2/actors/{actor}/metrics/history: + get: + description: Returns the metrics for a given actor. + operationId: actors_metrics_get + tags: + - ActorsMetrics + parameters: + - name: actor + in: path + description: The id of the actor to destroy + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + required: true + schema: + type: integer + - name: end + in: query + required: true + schema: + type: integer + - name: interval + in: query + required: true + schema: + type: integer +>>>>>>> 43e5048bc (fix: api changes) + responses: + '200': + description: '' + content: + application/json: + schema: +<<<<<<< HEAD + $ref: '#/components/schemas/ActorsExportActorLogsResponse' +======= + $ref: '#/components/schemas/ActorsGetActorMetricsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers/logs: + get: + description: Returns the logs for a given container. + operationId: containers_logs_get + tags: + - ContainersLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: stream + in: query + required: true + schema: + $ref: '#/components/schemas/ContainersQueryLogStream' + - name: container_ids_json + in: query + required: true + schema: + type: string + - name: search_text + in: query + required: false + schema: + type: string + - name: search_case_sensitive + in: query + required: false + schema: + type: boolean + - name: search_enable_regex + in: query + required: false + schema: + type: boolean + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerLogsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers/{container}/metrics/history: + get: + description: Returns the metrics for a given container. + operationId: containers_metrics_get + tags: + - ContainersMetrics + parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + required: true + schema: + type: integer + - name: end + in: query + required: true + schema: + type: integer + - name: interval + in: query + required: true + schema: + type: integer + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerMetricsResponse' +>>>>>>> 43e5048bc (fix: api changes) + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 +<<<<<<< HEAD + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + project: + type: string + environment: + type: string + query_json: + type: string + description: JSON-encoded query expression for filtering logs +======= +>>>>>>> 43e5048bc (fix: api changes) +components: + schemas: + ActorsGetActorResponse: + type: object + properties: + actor: + $ref: '#/components/schemas/ActorsActor' + required: + - actor ActorsCreateActorRequest: type: object properties: @@ -1541,8 +2209,6 @@ components: $ref: '#/components/schemas/ActorsCreateActorRuntimeRequest' network: $ref: '#/components/schemas/ActorsCreateActorNetworkRequest' - resources: - $ref: '#/components/schemas/ActorsResources' lifecycle: $ref: '#/components/schemas/ActorsLifecycle' required: @@ -1739,10 +2405,128 @@ components: presigned_requests: type: array items: - $ref: '#/components/schemas/UploadPresignedRequest' + $ref: '#/components/schemas/UploadPresignedRequest' + required: + - build + - presigned_requests + ContainersGetContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + required: + - container + ContainersCreateContainerRequest: + type: object + properties: + region: + type: string + tags: {} + build: + type: string + format: uuid + build_tags: {} + runtime: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeRequest' + network: + $ref: '#/components/schemas/ContainersCreateContainerNetworkRequest' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + required: + - tags + - resources + ContainersCreateContainerRuntimeRequest: + type: object + properties: + environment: + type: object + additionalProperties: + type: string + network: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeNetworkRequest' + ContainersCreateContainerRuntimeNetworkRequest: + type: object + properties: + endpoint_type: + $ref: '#/components/schemas/ContainersEndpointType' + required: + - endpoint_type + ContainersCreateContainerNetworkRequest: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersCreateContainerPortRequest' + wait_ready: + type: boolean + ContainersCreateContainerPortRequest: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + ContainersCreateContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + description: The container that was created + required: + - container + ContainersDestroyContainerResponse: + type: object + properties: {} + ContainersUpgradeContainerRequest: + type: object + properties: + build: + type: string + format: uuid + build_tags: {} + ContainersUpgradeContainerResponse: + type: object + properties: {} + ContainersUpgradeAllContainersRequest: + type: object + properties: + tags: {} + build: + type: string + format: uuid + build_tags: {} + required: + - tags + ContainersUpgradeAllContainersResponse: + type: object + properties: + count: + type: integer + format: int64 + required: + - count + ContainersListContainersResponse: + type: object + properties: + containers: + type: array + items: + $ref: '#/components/schemas/ContainersContainer' + description: A list of containers for the project associated with the token. + pagination: + $ref: '#/components/schemas/Pagination' required: - - build - - presigned_requests + - containers + - pagination RegionsListRegionsResponse: type: object properties: @@ -1838,8 +2622,6 @@ components: $ref: '#/components/schemas/ActorsRuntime' network: $ref: '#/components/schemas/ActorsNetwork' - resources: - $ref: '#/components/schemas/ActorsResources' lifecycle: $ref: '#/components/schemas/ActorsLifecycle' created_at: @@ -1888,24 +2670,6 @@ components: If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. - ActorsResources: - type: object - properties: - cpu: - type: integer - description: >- - The number of CPU cores in millicores, or 1/1000 of a core. For - example, - - 1/8 of a core would be 125 millicores, and 1 core would be 1000 - - millicores. - memory: - type: integer - description: The amount of memory in megabytes - required: - - cpu - - memory ActorsNetwork: type: object properties: @@ -1978,7 +2742,7 @@ components: actor_ids: type: array items: - $ref: '#/components/schemas/Id' + type: string description: >- List of actor IDs in these logs. The order of these correspond to the index in the log entry. @@ -2023,6 +2787,7 @@ components: - foreigns - actor_indices - watch +<<<<<<< HEAD ActorsExportActorLogsResponse: type: object properties: @@ -2031,6 +2796,48 @@ components: description: Presigned URL to download the exported logs required: - url +======= + ActorsQueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ActorsGetActorMetricsResponse: + type: object + properties: + actor_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double + required: + - actor_ids + - metric_names + - metric_attributes + - metric_types + - metric_values +>>>>>>> 43e5048bc (fix: api changes) BuildsBuild: type: object properties: @@ -2148,6 +2955,246 @@ components: properties: cursor: type: string + ContainersContainer: + type: object + properties: + id: + $ref: '#/components/schemas/Id' + region: + type: string + tags: {} + runtime: + $ref: '#/components/schemas/ContainersRuntime' + network: + $ref: '#/components/schemas/ContainersNetwork' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + created_at: + $ref: '#/components/schemas/Timestamp' + started_at: + $ref: '#/components/schemas/Timestamp' + destroyed_at: + $ref: '#/components/schemas/Timestamp' + required: + - id + - region + - tags + - runtime + - network + - resources + - lifecycle + - created_at + ContainersRuntime: + type: object + properties: + build: + type: string + format: uuid + arguments: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + required: + - build + ContainersLifecycle: + type: object + properties: + kill_timeout: + type: integer + format: int64 + description: >- + The duration to wait for in milliseconds before killing the + container. This should be set to a safe default, and can be + overridden during a DELETE request if needed. + durable: + type: boolean + description: >- + If true, the container will try to reschedule itself automatically + in the event of a crash or a datacenter failover. The container will + not reschedule if it exits successfully. + ContainersResources: + type: object + properties: + cpu: + type: integer + description: >- + The number of CPU cores in millicores, or 1/1000 of a core. For + example, + + 1/8 of a core would be 125 millicores, and 1 core would be 1000 + + millicores. + memory: + type: integer + description: The amount of memory in megabytes + required: + - cpu + - memory + ContainersNetwork: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersPort' + required: + - mode + - ports + ContainersNetworkMode: + type: string + enum: + - bridge + - host + ContainersPort: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + hostname: + type: string + port: + type: integer + path: + type: string + url: + type: string + description: >- + Fully formed connection URL including protocol, hostname, port, and + path, if applicable. + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + - routing + ContainersPortProtocol: + type: string + enum: + - http + - https + - tcp + - tcp_tls + - udp + ContainersPortRouting: + type: object + properties: + guard: + $ref: '#/components/schemas/ContainersGuardRouting' + host: + $ref: '#/components/schemas/ContainersHostRouting' + ContainersGuardRouting: + type: object + properties: {} + ContainersHostRouting: + type: object + properties: {} + ContainersEndpointType: + type: string + enum: + - hostname + - path + ContainersGetContainerLogsResponse: + type: object + properties: + container_ids: + type: array + items: + $ref: '#/components/schemas/Id' + description: >- + List of container IDs in these logs. The order of these correspond + to the index in the log entry. + lines: + type: array + items: + type: string + description: Sorted old to new. + timestamps: + type: array + items: + $ref: '#/components/schemas/Timestamp' + description: Sorted old to new. + streams: + type: array + items: + type: integer + description: |- + Streams the logs came from. + + 0 = stdout + 1 = stderr + foreigns: + type: array + items: + type: boolean + description: >- + List of flags denoting if this log is not directly from the + container. + container_indices: + type: array + items: + type: integer + description: >- + Index of the container that this log was for. Use this index to look + the full ID in `container_ids`. + watch: + $ref: '#/components/schemas/WatchResponse' + required: + - container_ids + - lines + - timestamps + - streams + - foreigns + - container_indices + - watch + ContainersQueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ContainersGetContainerMetricsResponse: + type: object + properties: + container_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double + required: + - container_ids + - metric_names + - metric_attributes + - metric_types + - metric_values RegionsRegion: type: object properties: diff --git a/sdks/api/runtime/openapi_compat/openapi.yml b/sdks/api/runtime/openapi_compat/openapi.yml index 5cb8c9f58f..122a0fca14 100644 --- a/sdks/api/runtime/openapi_compat/openapi.yml +++ b/sdks/api/runtime/openapi_compat/openapi.yml @@ -3,7 +3,7 @@ info: title: Rivet API version: 0.0.1 paths: - '/actors/{actor}': + '/v2/actors/{actor}': get: description: Gets a actor. operationId: actors_get @@ -152,7 +152,7 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /actors: + /v2/actors: get: description: >- Lists all actors associated with the token used. Can be filtered by tags @@ -306,7 +306,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsCreateActorRequest' - '/actors/{actor}/upgrade': + '/v2/actors/{actor}/upgrade': post: description: Upgrades a actor. operationId: actors_upgrade @@ -379,7 +379,7 @@ paths: application/json: schema: $ref: '#/components/schemas/ActorsUpgradeActorRequest' - /actors/upgrade: + /v2/actors/upgrade: post: description: Upgrades all actors matching the given tags. operationId: actors_upgradeAll @@ -954,12 +954,19 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /regions: + '/v1/containers/{container}': get: - operationId: regions_list + description: Gets a container. + operationId: containers_get tags: - - Regions + - Containers parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -970,13 +977,18 @@ paths: required: false schema: type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RegionsListRegionsResponse' + $ref: '#/components/schemas/ContainersGetContainerResponse' '400': description: '' content: @@ -1013,12 +1025,19 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - /regions/recommend: - get: - operationId: regions_recommend + security: *ref_0 + delete: + description: Destroy a container. + operationId: containers_destroy tags: - - Regions + - Containers parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -1029,25 +1048,23 @@ paths: required: false schema: type: string - - name: lat - in: query - required: false - schema: - type: number - format: double - - name: long + - name: override_kill_timeout in: query + description: >- + The duration to wait for in milliseconds before killing the + container. This should be used to override the default kill timeout + if a faster time is needed, say for ignoring a graceful shutdown. required: false schema: - type: number - format: double + type: integer + format: int64 responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RegionsRecommendRegionResponse' + $ref: '#/components/schemas/ContainersDestroyContainerResponse' '400': description: '' content: @@ -1084,12 +1101,15 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - /routes: + security: *ref_0 + /v1/containers: get: - description: Lists all routes of the given environment. - operationId: routes_list + description: >- + Lists all containers associated with the token used. Can be filtered by + tags in the query string. + operationId: containers_list tags: - - Routes + - Containers parameters: - name: project in: query @@ -1101,13 +1121,33 @@ paths: required: false schema: type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' + - name: tags_json + in: query + required: false + schema: + type: string + - name: include_destroyed + in: query + required: false + schema: + type: boolean + - name: cursor + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RoutesListRoutesResponse' + $ref: '#/components/schemas/ContainersListContainersResponse' '400': description: '' content: @@ -1145,18 +1185,12 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - '/routes/{id}': - put: - description: Creates or updates a route. - operationId: routes_update + post: + description: Create a new container. + operationId: containers_create tags: - - Routes + - Containers parameters: - - name: id - in: path - required: true - schema: - type: string - name: project in: query required: false @@ -1167,13 +1201,18 @@ paths: required: false schema: type: string + - name: endpoint_type + in: query + required: false + schema: + $ref: '#/components/schemas/ContainersEndpointType' responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/RoutesUpdateRouteResponse' + $ref: '#/components/schemas/ContainersCreateContainerResponse' '400': description: '' content: @@ -1216,18 +1255,20 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/RoutesUpdateRouteBody' - delete: - description: Deletes a route. - operationId: routes_delete + $ref: '#/components/schemas/ContainersCreateContainerRequest' + '/v1/containers/{container}/upgrade': + post: + description: Upgrades a container. + operationId: containers_upgrade tags: - - Routes + - Containers parameters: - - name: id + - name: container in: path + description: The id of the container to upgrade required: true schema: - type: string + $ref: '#/components/schemas/Id' - name: project in: query required: false @@ -1244,7 +1285,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/RoutesDeleteRouteResponse' + $ref: '#/components/schemas/ContainersUpgradeContainerResponse' '400': description: '' content: @@ -1282,14 +1323,18 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - /routes/history: - get: - description: >- - Returns time series data for HTTP requests routed to actors. Allows - filtering and grouping by various request properties. - operationId: routes_history + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeContainerRequest' + /v1/containers/upgrade: + post: + description: Upgrades all containers matching the given tags. + operationId: containers_upgradeAll tags: - - Routes + - Containers parameters: - name: project in: query @@ -1301,36 +1346,69 @@ paths: required: false schema: type: string - - name: start - in: query - description: Start timestamp in milliseconds - required: true - schema: - type: integer - - name: end - in: query - description: End timestamp in milliseconds - required: true - schema: - type: integer - - name: interval - in: query - description: Time bucket interval in milliseconds - required: true - schema: - type: integer - - name: query_json + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersUpgradeAllContainersRequest' + /regions: + get: + operationId: regions_list + tags: + - Regions + parameters: + - name: project in: query - description: JSON-encoded query expression for filtering requests required: false schema: type: string - - name: group_by + - name: environment in: query - description: >- - JSON-encoded KeyPath for grouping results (e.g. - {"property":"client_request_host"} or - {"property":"tags","map_key":"version"}) required: false schema: type: string @@ -1340,7 +1418,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/RoutesHistoryResponse' + $ref: '#/components/schemas/RegionsListRegionsResponse' '400': description: '' content: @@ -1377,13 +1455,11 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - security: *ref_0 - /actors/logs: + /regions/recommend: get: - description: Returns the logs for a given actor. - operationId: actors_logs_get + operationId: regions_recommend tags: - - ActorsLogs + - Regions parameters: - name: project in: query @@ -1395,25 +1471,25 @@ paths: required: false schema: type: string - - name: query_json + - name: lat in: query - description: JSON-encoded query expression for filtering logs required: false schema: - type: string - - name: watch_index + type: number + format: double + - name: long in: query - description: A query parameter denoting the requests watch index. required: false schema: - type: string + type: number + format: double responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsGetActorLogsResponse' + $ref: '#/components/schemas/RegionsRecommendRegionResponse' '400': description: '' content: @@ -1450,23 +1526,30 @@ paths: application/json: schema: $ref: '#/components/schemas/ErrorBody' - security: *ref_0 - /actors/logs/export: - post: - description: >- - Exports logs for the given actors to an S3 bucket and returns a - presigned URL to download. - operationId: actors_logs_export + /routes: + get: + description: Lists all routes of the given environment. + operationId: routes_list tags: - - ActorsLogs - parameters: [] + - Routes + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string responses: '200': description: '' content: application/json: schema: - $ref: '#/components/schemas/ActorsExportActorLogsResponse' + $ref: '#/components/schemas/RoutesListRoutesResponse' '400': description: '' content: @@ -1504,29 +1587,614 @@ paths: schema: $ref: '#/components/schemas/ErrorBody' security: *ref_0 - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - project: - type: string - environment: - type: string - query_json: - type: string - description: JSON-encoded query expression for filtering logs -components: - schemas: - ActorsGetActorResponse: - type: object - properties: - actor: - $ref: '#/components/schemas/ActorsActor' - required: - - actor + '/routes/{id}': + put: + description: Creates or updates a route. + operationId: routes_update + tags: + - Routes + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesUpdateRouteResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesUpdateRouteBody' + delete: + description: Deletes a route. + operationId: routes_delete + tags: + - Routes + parameters: + - name: id + in: path + required: true + schema: + type: string + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesDeleteRouteResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 +<<<<<<< HEAD + /routes/history: + get: + description: >- + Returns time series data for HTTP requests routed to actors. Allows + filtering and grouping by various request properties. + operationId: routes_history + tags: + - Routes + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + description: Start timestamp in milliseconds + required: true + schema: + type: integer + - name: end + in: query + description: End timestamp in milliseconds + required: true + schema: + type: integer + - name: interval + in: query + description: Time bucket interval in milliseconds + required: true + schema: + type: integer + - name: query_json + in: query + description: JSON-encoded query expression for filtering requests + required: false + schema: + type: string + - name: group_by + in: query + description: >- + JSON-encoded KeyPath for grouping results (e.g. + {"property":"client_request_host"} or + {"property":"tags","map_key":"version"}) + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/RoutesHistoryResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /actors/logs: +======= + /v2/actors/logs: +>>>>>>> 43e5048bc (fix: api changes) + get: + description: Returns the logs for a given actor. + operationId: actors_logs_get + tags: + - ActorsLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: query_json + in: query + description: JSON-encoded query expression for filtering logs + required: false + schema: + type: string + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ActorsGetActorLogsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 +<<<<<<< HEAD + /actors/logs/export: + post: + description: >- + Exports logs for the given actors to an S3 bucket and returns a + presigned URL to download. + operationId: actors_logs_export + tags: + - ActorsLogs + parameters: [] +======= + '/v2/actors/{actor}/metrics/history': + get: + description: Returns the metrics for a given actor. + operationId: actors_metrics_get + tags: + - ActorsMetrics + parameters: + - name: actor + in: path + description: The id of the actor to destroy + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + required: true + schema: + type: integer + - name: end + in: query + required: true + schema: + type: integer + - name: interval + in: query + required: true + schema: + type: integer +>>>>>>> 43e5048bc (fix: api changes) + responses: + '200': + description: '' + content: + application/json: + schema: +<<<<<<< HEAD + $ref: '#/components/schemas/ActorsExportActorLogsResponse' +======= + $ref: '#/components/schemas/ActorsGetActorMetricsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + /v1/containers/logs: + get: + description: Returns the logs for a given container. + operationId: containers_logs_get + tags: + - ContainersLogs + parameters: + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: stream + in: query + required: true + schema: + $ref: '#/components/schemas/ContainersQueryLogStream' + - name: container_ids_json + in: query + required: true + schema: + type: string + - name: search_text + in: query + required: false + schema: + type: string + - name: search_case_sensitive + in: query + required: false + schema: + type: boolean + - name: search_enable_regex + in: query + required: false + schema: + type: boolean + - name: watch_index + in: query + description: A query parameter denoting the requests watch index. + required: false + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerLogsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 + '/v1/containers/{container}/metrics/history': + get: + description: Returns the metrics for a given container. + operationId: containers_metrics_get + tags: + - ContainersMetrics + parameters: + - name: container + in: path + description: The id of the container to destroy + required: true + schema: + $ref: '#/components/schemas/Id' + - name: project + in: query + required: false + schema: + type: string + - name: environment + in: query + required: false + schema: + type: string + - name: start + in: query + required: true + schema: + type: integer + - name: end + in: query + required: true + schema: + type: integer + - name: interval + in: query + required: true + schema: + type: integer + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ContainersGetContainerMetricsResponse' +>>>>>>> 43e5048bc (fix: api changes) + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 +<<<<<<< HEAD + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + project: + type: string + environment: + type: string + query_json: + type: string + description: JSON-encoded query expression for filtering logs +======= +>>>>>>> 43e5048bc (fix: api changes) +components: + schemas: + ActorsGetActorResponse: + type: object + properties: + actor: + $ref: '#/components/schemas/ActorsActor' + required: + - actor ActorsCreateActorRequest: type: object properties: @@ -1541,8 +2209,6 @@ components: $ref: '#/components/schemas/ActorsCreateActorRuntimeRequest' network: $ref: '#/components/schemas/ActorsCreateActorNetworkRequest' - resources: - $ref: '#/components/schemas/ActorsResources' lifecycle: $ref: '#/components/schemas/ActorsLifecycle' required: @@ -1739,10 +2405,128 @@ components: presigned_requests: type: array items: - $ref: '#/components/schemas/UploadPresignedRequest' + $ref: '#/components/schemas/UploadPresignedRequest' + required: + - build + - presigned_requests + ContainersGetContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + required: + - container + ContainersCreateContainerRequest: + type: object + properties: + region: + type: string + tags: {} + build: + type: string + format: uuid + build_tags: {} + runtime: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeRequest' + network: + $ref: '#/components/schemas/ContainersCreateContainerNetworkRequest' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + required: + - tags + - resources + ContainersCreateContainerRuntimeRequest: + type: object + properties: + environment: + type: object + additionalProperties: + type: string + network: + $ref: '#/components/schemas/ContainersCreateContainerRuntimeNetworkRequest' + ContainersCreateContainerRuntimeNetworkRequest: + type: object + properties: + endpoint_type: + $ref: '#/components/schemas/ContainersEndpointType' + required: + - endpoint_type + ContainersCreateContainerNetworkRequest: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersCreateContainerPortRequest' + wait_ready: + type: boolean + ContainersCreateContainerPortRequest: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + ContainersCreateContainerResponse: + type: object + properties: + container: + $ref: '#/components/schemas/ContainersContainer' + description: The container that was created + required: + - container + ContainersDestroyContainerResponse: + type: object + properties: {} + ContainersUpgradeContainerRequest: + type: object + properties: + build: + type: string + format: uuid + build_tags: {} + ContainersUpgradeContainerResponse: + type: object + properties: {} + ContainersUpgradeAllContainersRequest: + type: object + properties: + tags: {} + build: + type: string + format: uuid + build_tags: {} + required: + - tags + ContainersUpgradeAllContainersResponse: + type: object + properties: + count: + type: integer + format: int64 + required: + - count + ContainersListContainersResponse: + type: object + properties: + containers: + type: array + items: + $ref: '#/components/schemas/ContainersContainer' + description: A list of containers for the project associated with the token. + pagination: + $ref: '#/components/schemas/Pagination' required: - - build - - presigned_requests + - containers + - pagination RegionsListRegionsResponse: type: object properties: @@ -1838,8 +2622,6 @@ components: $ref: '#/components/schemas/ActorsRuntime' network: $ref: '#/components/schemas/ActorsNetwork' - resources: - $ref: '#/components/schemas/ActorsResources' lifecycle: $ref: '#/components/schemas/ActorsLifecycle' created_at: @@ -1888,24 +2670,6 @@ components: If true, the actor will try to reschedule itself automatically in the event of a crash or a datacenter failover. The actor will not reschedule if it exits successfully. - ActorsResources: - type: object - properties: - cpu: - type: integer - description: >- - The number of CPU cores in millicores, or 1/1000 of a core. For - example, - - 1/8 of a core would be 125 millicores, and 1 core would be 1000 - - millicores. - memory: - type: integer - description: The amount of memory in megabytes - required: - - cpu - - memory ActorsNetwork: type: object properties: @@ -1978,7 +2742,7 @@ components: actor_ids: type: array items: - $ref: '#/components/schemas/Id' + type: string description: >- List of actor IDs in these logs. The order of these correspond to the index in the log entry. @@ -2023,6 +2787,7 @@ components: - foreigns - actor_indices - watch +<<<<<<< HEAD ActorsExportActorLogsResponse: type: object properties: @@ -2031,6 +2796,48 @@ components: description: Presigned URL to download the exported logs required: - url +======= + ActorsQueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ActorsGetActorMetricsResponse: + type: object + properties: + actor_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double + required: + - actor_ids + - metric_names + - metric_attributes + - metric_types + - metric_values +>>>>>>> 43e5048bc (fix: api changes) BuildsBuild: type: object properties: @@ -2148,6 +2955,246 @@ components: properties: cursor: type: string + ContainersContainer: + type: object + properties: + id: + $ref: '#/components/schemas/Id' + region: + type: string + tags: {} + runtime: + $ref: '#/components/schemas/ContainersRuntime' + network: + $ref: '#/components/schemas/ContainersNetwork' + resources: + $ref: '#/components/schemas/ContainersResources' + lifecycle: + $ref: '#/components/schemas/ContainersLifecycle' + created_at: + $ref: '#/components/schemas/Timestamp' + started_at: + $ref: '#/components/schemas/Timestamp' + destroyed_at: + $ref: '#/components/schemas/Timestamp' + required: + - id + - region + - tags + - runtime + - network + - resources + - lifecycle + - created_at + ContainersRuntime: + type: object + properties: + build: + type: string + format: uuid + arguments: + type: array + items: + type: string + environment: + type: object + additionalProperties: + type: string + required: + - build + ContainersLifecycle: + type: object + properties: + kill_timeout: + type: integer + format: int64 + description: >- + The duration to wait for in milliseconds before killing the + container. This should be set to a safe default, and can be + overridden during a DELETE request if needed. + durable: + type: boolean + description: >- + If true, the container will try to reschedule itself automatically + in the event of a crash or a datacenter failover. The container will + not reschedule if it exits successfully. + ContainersResources: + type: object + properties: + cpu: + type: integer + description: >- + The number of CPU cores in millicores, or 1/1000 of a core. For + example, + + 1/8 of a core would be 125 millicores, and 1 core would be 1000 + + millicores. + memory: + type: integer + description: The amount of memory in megabytes + required: + - cpu + - memory + ContainersNetwork: + type: object + properties: + mode: + $ref: '#/components/schemas/ContainersNetworkMode' + ports: + type: object + additionalProperties: + $ref: '#/components/schemas/ContainersPort' + required: + - mode + - ports + ContainersNetworkMode: + type: string + enum: + - bridge + - host + ContainersPort: + type: object + properties: + protocol: + $ref: '#/components/schemas/ContainersPortProtocol' + internal_port: + type: integer + hostname: + type: string + port: + type: integer + path: + type: string + url: + type: string + description: >- + Fully formed connection URL including protocol, hostname, port, and + path, if applicable. + routing: + $ref: '#/components/schemas/ContainersPortRouting' + required: + - protocol + - routing + ContainersPortProtocol: + type: string + enum: + - http + - https + - tcp + - tcp_tls + - udp + ContainersPortRouting: + type: object + properties: + guard: + $ref: '#/components/schemas/ContainersGuardRouting' + host: + $ref: '#/components/schemas/ContainersHostRouting' + ContainersGuardRouting: + type: object + properties: {} + ContainersHostRouting: + type: object + properties: {} + ContainersEndpointType: + type: string + enum: + - hostname + - path + ContainersGetContainerLogsResponse: + type: object + properties: + container_ids: + type: array + items: + $ref: '#/components/schemas/Id' + description: >- + List of container IDs in these logs. The order of these correspond + to the index in the log entry. + lines: + type: array + items: + type: string + description: Sorted old to new. + timestamps: + type: array + items: + $ref: '#/components/schemas/Timestamp' + description: Sorted old to new. + streams: + type: array + items: + type: integer + description: |- + Streams the logs came from. + + 0 = stdout + 1 = stderr + foreigns: + type: array + items: + type: boolean + description: >- + List of flags denoting if this log is not directly from the + container. + container_indices: + type: array + items: + type: integer + description: >- + Index of the container that this log was for. Use this index to look + the full ID in `container_ids`. + watch: + $ref: '#/components/schemas/WatchResponse' + required: + - container_ids + - lines + - timestamps + - streams + - foreigns + - container_indices + - watch + ContainersQueryLogStream: + type: string + enum: + - std_out + - std_err + - all + ContainersGetContainerMetricsResponse: + type: object + properties: + container_ids: + type: array + items: + type: string + metric_names: + type: array + items: + type: string + metric_attributes: + type: array + items: + type: object + additionalProperties: + type: string + metric_types: + type: array + items: + type: string + metric_values: + type: array + items: + type: array + items: + type: number + format: double + required: + - container_ids + - metric_names + - metric_attributes + - metric_types + - metric_values RegionsRegion: type: object properties: diff --git a/sdks/api/runtime/rust/.openapi-generator/FILES b/sdks/api/runtime/rust/.openapi-generator/FILES index 278e55f627..e133b96e1c 100644 --- a/sdks/api/runtime/rust/.openapi-generator/FILES +++ b/sdks/api/runtime/rust/.openapi-generator/FILES @@ -14,19 +14,28 @@ docs/ActorsCreateActorRuntimeRequest.md docs/ActorsEndpointType.md docs/ActorsExportActorLogsResponse.md docs/ActorsGetActorLogsResponse.md +docs/ActorsGetActorMetricsResponse.md docs/ActorsGetActorResponse.md docs/ActorsGetActorUsageResponse.md docs/ActorsLifecycle.md docs/ActorsListActorsResponse.md docs/ActorsLogsApi.md +<<<<<<< HEAD docs/ActorsLogsExportRequest.md +======= +docs/ActorsMetricsApi.md +>>>>>>> 43e5048bc (fix: api changes) docs/ActorsNetwork.md docs/ActorsNetworkMode.md docs/ActorsPort.md docs/ActorsPortProtocol.md docs/ActorsPortRouting.md +<<<<<<< HEAD docs/ActorsQueryActorsResponse.md docs/ActorsResources.md +======= +docs/ActorsQueryLogStream.md +>>>>>>> 43e5048bc (fix: api changes) docs/ActorsRuntime.md docs/ActorsUpgradeActorRequest.md docs/ActorsUpgradeAllActorsRequest.md @@ -43,6 +52,33 @@ docs/BuildsPatchBuildTagsRequest.md docs/BuildsPrepareBuildRequest.md docs/BuildsPrepareBuildResponse.md docs/BuildsResources.md +docs/ContainersApi.md +docs/ContainersContainer.md +docs/ContainersCreateContainerNetworkRequest.md +docs/ContainersCreateContainerPortRequest.md +docs/ContainersCreateContainerRequest.md +docs/ContainersCreateContainerResponse.md +docs/ContainersCreateContainerRuntimeNetworkRequest.md +docs/ContainersCreateContainerRuntimeRequest.md +docs/ContainersEndpointType.md +docs/ContainersGetContainerLogsResponse.md +docs/ContainersGetContainerMetricsResponse.md +docs/ContainersGetContainerResponse.md +docs/ContainersLifecycle.md +docs/ContainersListContainersResponse.md +docs/ContainersLogsApi.md +docs/ContainersMetricsApi.md +docs/ContainersNetwork.md +docs/ContainersNetworkMode.md +docs/ContainersPort.md +docs/ContainersPortProtocol.md +docs/ContainersPortRouting.md +docs/ContainersQueryLogStream.md +docs/ContainersResources.md +docs/ContainersRuntime.md +docs/ContainersUpgradeAllContainersRequest.md +docs/ContainersUpgradeAllContainersResponse.md +docs/ContainersUpgradeContainerRequest.md docs/ErrorBody.md docs/Pagination.md docs/RegionsApi.md @@ -62,8 +98,12 @@ docs/WatchResponse.md git_push.sh src/apis/actors_api.rs src/apis/actors_logs_api.rs +src/apis/actors_metrics_api.rs src/apis/builds_api.rs src/apis/configuration.rs +src/apis/containers_api.rs +src/apis/containers_logs_api.rs +src/apis/containers_metrics_api.rs src/apis/mod.rs src/apis/regions_api.rs src/apis/routes_api.rs @@ -78,6 +118,7 @@ src/models/actors_create_actor_runtime_request.rs src/models/actors_endpoint_type.rs src/models/actors_export_actor_logs_response.rs src/models/actors_get_actor_logs_response.rs +src/models/actors_get_actor_metrics_response.rs src/models/actors_get_actor_response.rs src/models/actors_get_actor_usage_response.rs src/models/actors_lifecycle.rs @@ -88,8 +129,12 @@ src/models/actors_network_mode.rs src/models/actors_port.rs src/models/actors_port_protocol.rs src/models/actors_port_routing.rs +<<<<<<< HEAD src/models/actors_query_actors_response.rs src/models/actors_resources.rs +======= +src/models/actors_query_log_stream.rs +>>>>>>> 43e5048bc (fix: api changes) src/models/actors_runtime.rs src/models/actors_upgrade_actor_request.rs src/models/actors_upgrade_all_actors_request.rs @@ -105,6 +150,30 @@ src/models/builds_patch_build_tags_request.rs src/models/builds_prepare_build_request.rs src/models/builds_prepare_build_response.rs src/models/builds_resources.rs +src/models/containers_container.rs +src/models/containers_create_container_network_request.rs +src/models/containers_create_container_port_request.rs +src/models/containers_create_container_request.rs +src/models/containers_create_container_response.rs +src/models/containers_create_container_runtime_network_request.rs +src/models/containers_create_container_runtime_request.rs +src/models/containers_endpoint_type.rs +src/models/containers_get_container_logs_response.rs +src/models/containers_get_container_metrics_response.rs +src/models/containers_get_container_response.rs +src/models/containers_lifecycle.rs +src/models/containers_list_containers_response.rs +src/models/containers_network.rs +src/models/containers_network_mode.rs +src/models/containers_port.rs +src/models/containers_port_protocol.rs +src/models/containers_port_routing.rs +src/models/containers_query_log_stream.rs +src/models/containers_resources.rs +src/models/containers_runtime.rs +src/models/containers_upgrade_all_containers_request.rs +src/models/containers_upgrade_all_containers_response.rs +src/models/containers_upgrade_container_request.rs src/models/error_body.rs src/models/mod.rs src/models/pagination.rs diff --git a/sdks/api/runtime/rust/README.md b/sdks/api/runtime/rust/README.md index 2b8645499b..47963ed294 100644 --- a/sdks/api/runtime/rust/README.md +++ b/sdks/api/runtime/rust/README.md @@ -25,6 +25,7 @@ All URIs are relative to *https://api.rivet.gg* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- +<<<<<<< HEAD *ActorsApi* | [**actors_create**](docs/ActorsApi.md#actors_create) | **POST** /actors | *ActorsApi* | [**actors_destroy**](docs/ActorsApi.md#actors_destroy) | **DELETE** /actors/{actor} | *ActorsApi* | [**actors_get**](docs/ActorsApi.md#actors_get) | **GET** /actors/{actor} | @@ -35,11 +36,29 @@ Class | Method | HTTP request | Description *ActorsApi* | [**actors_usage**](docs/ActorsApi.md#actors_usage) | **GET** /actors/usage | *ActorsLogsApi* | [**actors_logs_export**](docs/ActorsLogsApi.md#actors_logs_export) | **POST** /actors/logs/export | *ActorsLogsApi* | [**actors_logs_get**](docs/ActorsLogsApi.md#actors_logs_get) | **GET** /actors/logs | +======= +*ActorsApi* | [**actors_create**](docs/ActorsApi.md#actors_create) | **POST** /v2/actors | +*ActorsApi* | [**actors_destroy**](docs/ActorsApi.md#actors_destroy) | **DELETE** /v2/actors/{actor} | +*ActorsApi* | [**actors_get**](docs/ActorsApi.md#actors_get) | **GET** /v2/actors/{actor} | +*ActorsApi* | [**actors_list**](docs/ActorsApi.md#actors_list) | **GET** /v2/actors | +*ActorsApi* | [**actors_upgrade**](docs/ActorsApi.md#actors_upgrade) | **POST** /v2/actors/{actor}/upgrade | +*ActorsApi* | [**actors_upgrade_all**](docs/ActorsApi.md#actors_upgrade_all) | **POST** /v2/actors/upgrade | +*ActorsLogsApi* | [**actors_logs_get**](docs/ActorsLogsApi.md#actors_logs_get) | **GET** /v2/actors/logs | +*ActorsMetricsApi* | [**actors_metrics_get**](docs/ActorsMetricsApi.md#actors_metrics_get) | **GET** /v2/actors/{actor}/metrics/history | +>>>>>>> 43e5048bc (fix: api changes) *BuildsApi* | [**builds_complete**](docs/BuildsApi.md#builds_complete) | **POST** /builds/{build}/complete | *BuildsApi* | [**builds_get**](docs/BuildsApi.md#builds_get) | **GET** /builds/{build} | *BuildsApi* | [**builds_list**](docs/BuildsApi.md#builds_list) | **GET** /builds | *BuildsApi* | [**builds_patch_tags**](docs/BuildsApi.md#builds_patch_tags) | **PATCH** /builds/{build}/tags | *BuildsApi* | [**builds_prepare**](docs/BuildsApi.md#builds_prepare) | **POST** /builds/prepare | +*ContainersApi* | [**containers_create**](docs/ContainersApi.md#containers_create) | **POST** /v1/containers | +*ContainersApi* | [**containers_destroy**](docs/ContainersApi.md#containers_destroy) | **DELETE** /v1/containers/{container} | +*ContainersApi* | [**containers_get**](docs/ContainersApi.md#containers_get) | **GET** /v1/containers/{container} | +*ContainersApi* | [**containers_list**](docs/ContainersApi.md#containers_list) | **GET** /v1/containers | +*ContainersApi* | [**containers_upgrade**](docs/ContainersApi.md#containers_upgrade) | **POST** /v1/containers/{container}/upgrade | +*ContainersApi* | [**containers_upgrade_all**](docs/ContainersApi.md#containers_upgrade_all) | **POST** /v1/containers/upgrade | +*ContainersLogsApi* | [**containers_logs_get**](docs/ContainersLogsApi.md#containers_logs_get) | **GET** /v1/containers/logs | +*ContainersMetricsApi* | [**containers_metrics_get**](docs/ContainersMetricsApi.md#containers_metrics_get) | **GET** /v1/containers/{container}/metrics/history | *RegionsApi* | [**regions_list**](docs/RegionsApi.md#regions_list) | **GET** /regions | *RegionsApi* | [**regions_recommend**](docs/RegionsApi.md#regions_recommend) | **GET** /regions/recommend | *RoutesApi* | [**routes_delete**](docs/RoutesApi.md#routes_delete) | **DELETE** /routes/{id} | @@ -60,6 +79,7 @@ Class | Method | HTTP request | Description - [ActorsEndpointType](docs/ActorsEndpointType.md) - [ActorsExportActorLogsResponse](docs/ActorsExportActorLogsResponse.md) - [ActorsGetActorLogsResponse](docs/ActorsGetActorLogsResponse.md) + - [ActorsGetActorMetricsResponse](docs/ActorsGetActorMetricsResponse.md) - [ActorsGetActorResponse](docs/ActorsGetActorResponse.md) - [ActorsGetActorUsageResponse](docs/ActorsGetActorUsageResponse.md) - [ActorsLifecycle](docs/ActorsLifecycle.md) @@ -70,8 +90,12 @@ Class | Method | HTTP request | Description - [ActorsPort](docs/ActorsPort.md) - [ActorsPortProtocol](docs/ActorsPortProtocol.md) - [ActorsPortRouting](docs/ActorsPortRouting.md) +<<<<<<< HEAD - [ActorsQueryActorsResponse](docs/ActorsQueryActorsResponse.md) - [ActorsResources](docs/ActorsResources.md) +======= + - [ActorsQueryLogStream](docs/ActorsQueryLogStream.md) +>>>>>>> 43e5048bc (fix: api changes) - [ActorsRuntime](docs/ActorsRuntime.md) - [ActorsUpgradeActorRequest](docs/ActorsUpgradeActorRequest.md) - [ActorsUpgradeAllActorsRequest](docs/ActorsUpgradeAllActorsRequest.md) @@ -87,6 +111,30 @@ Class | Method | HTTP request | Description - [BuildsPrepareBuildRequest](docs/BuildsPrepareBuildRequest.md) - [BuildsPrepareBuildResponse](docs/BuildsPrepareBuildResponse.md) - [BuildsResources](docs/BuildsResources.md) + - [ContainersContainer](docs/ContainersContainer.md) + - [ContainersCreateContainerNetworkRequest](docs/ContainersCreateContainerNetworkRequest.md) + - [ContainersCreateContainerPortRequest](docs/ContainersCreateContainerPortRequest.md) + - [ContainersCreateContainerRequest](docs/ContainersCreateContainerRequest.md) + - [ContainersCreateContainerResponse](docs/ContainersCreateContainerResponse.md) + - [ContainersCreateContainerRuntimeNetworkRequest](docs/ContainersCreateContainerRuntimeNetworkRequest.md) + - [ContainersCreateContainerRuntimeRequest](docs/ContainersCreateContainerRuntimeRequest.md) + - [ContainersEndpointType](docs/ContainersEndpointType.md) + - [ContainersGetContainerLogsResponse](docs/ContainersGetContainerLogsResponse.md) + - [ContainersGetContainerMetricsResponse](docs/ContainersGetContainerMetricsResponse.md) + - [ContainersGetContainerResponse](docs/ContainersGetContainerResponse.md) + - [ContainersLifecycle](docs/ContainersLifecycle.md) + - [ContainersListContainersResponse](docs/ContainersListContainersResponse.md) + - [ContainersNetwork](docs/ContainersNetwork.md) + - [ContainersNetworkMode](docs/ContainersNetworkMode.md) + - [ContainersPort](docs/ContainersPort.md) + - [ContainersPortProtocol](docs/ContainersPortProtocol.md) + - [ContainersPortRouting](docs/ContainersPortRouting.md) + - [ContainersQueryLogStream](docs/ContainersQueryLogStream.md) + - [ContainersResources](docs/ContainersResources.md) + - [ContainersRuntime](docs/ContainersRuntime.md) + - [ContainersUpgradeAllContainersRequest](docs/ContainersUpgradeAllContainersRequest.md) + - [ContainersUpgradeAllContainersResponse](docs/ContainersUpgradeAllContainersResponse.md) + - [ContainersUpgradeContainerRequest](docs/ContainersUpgradeContainerRequest.md) - [ErrorBody](docs/ErrorBody.md) - [Pagination](docs/Pagination.md) - [RegionsListRegionsResponse](docs/RegionsListRegionsResponse.md) diff --git a/sdks/api/runtime/rust/docs/ActorsActor.md b/sdks/api/runtime/rust/docs/ActorsActor.md index 2e0a4dccfe..51f1a591c7 100644 --- a/sdks/api/runtime/rust/docs/ActorsActor.md +++ b/sdks/api/runtime/rust/docs/ActorsActor.md @@ -9,7 +9,6 @@ Name | Type | Description | Notes **tags** | Option<[**serde_json::Value**](.md)> | | **runtime** | [**crate::models::ActorsRuntime**](ActorsRuntime.md) | | **network** | [**crate::models::ActorsNetwork**](ActorsNetwork.md) | | -**resources** | Option<[**crate::models::ActorsResources**](ActorsResources.md)> | | [optional] **lifecycle** | [**crate::models::ActorsLifecycle**](ActorsLifecycle.md) | | **created_at** | **String** | RFC3339 timestamp | **started_at** | Option<**String**> | RFC3339 timestamp | [optional] diff --git a/sdks/api/runtime/rust/docs/ActorsApi.md b/sdks/api/runtime/rust/docs/ActorsApi.md index 2166a34a6f..894e6d3b75 100644 --- a/sdks/api/runtime/rust/docs/ActorsApi.md +++ b/sdks/api/runtime/rust/docs/ActorsApi.md @@ -4,6 +4,7 @@ All URIs are relative to *https://api.rivet.gg* Method | HTTP request | Description ------------- | ------------- | ------------- +<<<<<<< HEAD [**actors_create**](ActorsApi.md#actors_create) | **POST** /actors | [**actors_destroy**](ActorsApi.md#actors_destroy) | **DELETE** /actors/{actor} | [**actors_get**](ActorsApi.md#actors_get) | **GET** /actors/{actor} | @@ -12,6 +13,14 @@ Method | HTTP request | Description [**actors_upgrade**](ActorsApi.md#actors_upgrade) | **POST** /actors/{actor}/upgrade | [**actors_upgrade_all**](ActorsApi.md#actors_upgrade_all) | **POST** /actors/upgrade | [**actors_usage**](ActorsApi.md#actors_usage) | **GET** /actors/usage | +======= +[**actors_create**](ActorsApi.md#actors_create) | **POST** /v2/actors | +[**actors_destroy**](ActorsApi.md#actors_destroy) | **DELETE** /v2/actors/{actor} | +[**actors_get**](ActorsApi.md#actors_get) | **GET** /v2/actors/{actor} | +[**actors_list**](ActorsApi.md#actors_list) | **GET** /v2/actors | +[**actors_upgrade**](ActorsApi.md#actors_upgrade) | **POST** /v2/actors/{actor}/upgrade | +[**actors_upgrade_all**](ActorsApi.md#actors_upgrade_all) | **POST** /v2/actors/upgrade | +>>>>>>> 43e5048bc (fix: api changes) diff --git a/sdks/api/runtime/rust/docs/ActorsCreateActorRequest.md b/sdks/api/runtime/rust/docs/ActorsCreateActorRequest.md index 27d9c9d4f2..5f5686b46b 100644 --- a/sdks/api/runtime/rust/docs/ActorsCreateActorRequest.md +++ b/sdks/api/runtime/rust/docs/ActorsCreateActorRequest.md @@ -10,7 +10,6 @@ Name | Type | Description | Notes **build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] **runtime** | Option<[**crate::models::ActorsCreateActorRuntimeRequest**](ActorsCreateActorRuntimeRequest.md)> | | [optional] **network** | Option<[**crate::models::ActorsCreateActorNetworkRequest**](ActorsCreateActorNetworkRequest.md)> | | [optional] -**resources** | Option<[**crate::models::ActorsResources**](ActorsResources.md)> | | [optional] **lifecycle** | Option<[**crate::models::ActorsLifecycle**](ActorsLifecycle.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/api/runtime/rust/docs/ActorsGetActorMetricsResponse.md b/sdks/api/runtime/rust/docs/ActorsGetActorMetricsResponse.md new file mode 100644 index 0000000000..1fb694e7a6 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ActorsGetActorMetricsResponse.md @@ -0,0 +1,15 @@ +# ActorsGetActorMetricsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**actor_ids** | **Vec** | | +**metric_names** | **Vec** | | +**metric_attributes** | [**Vec<::std::collections::HashMap>**](map.md) | | +**metric_types** | **Vec** | | +**metric_values** | [**Vec>**](array.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ActorsLogsApi.md b/sdks/api/runtime/rust/docs/ActorsLogsApi.md index 72e930db8b..ba2d9273dc 100644 --- a/sdks/api/runtime/rust/docs/ActorsLogsApi.md +++ b/sdks/api/runtime/rust/docs/ActorsLogsApi.md @@ -4,8 +4,12 @@ All URIs are relative to *https://api.rivet.gg* Method | HTTP request | Description ------------- | ------------- | ------------- +<<<<<<< HEAD [**actors_logs_export**](ActorsLogsApi.md#actors_logs_export) | **POST** /actors/logs/export | [**actors_logs_get**](ActorsLogsApi.md#actors_logs_get) | **GET** /actors/logs | +======= +[**actors_logs_get**](ActorsLogsApi.md#actors_logs_get) | **GET** /v2/actors/logs | +>>>>>>> 43e5048bc (fix: api changes) diff --git a/sdks/api/runtime/rust/docs/ActorsMetricsApi.md b/sdks/api/runtime/rust/docs/ActorsMetricsApi.md new file mode 100644 index 0000000000..b9865fb514 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ActorsMetricsApi.md @@ -0,0 +1,44 @@ +# \ActorsMetricsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**actors_metrics_get**](ActorsMetricsApi.md#actors_metrics_get) | **GET** /v2/actors/{actor}/metrics/history | + + + +## actors_metrics_get + +> crate::models::ActorsGetActorMetricsResponse actors_metrics_get(actor, start, end, interval, project, environment) + + +Returns the metrics for a given actor. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**actor** | **String** | The id of the actor to destroy | [required] | +**start** | **i32** | | [required] | +**end** | **i32** | | [required] | +**interval** | **i32** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ActorsGetActorMetricsResponse**](ActorsGetActorMetricsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/runtime/rust/docs/ContainersApi.md b/sdks/api/runtime/rust/docs/ContainersApi.md new file mode 100644 index 0000000000..78ad6e627f --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersApi.md @@ -0,0 +1,213 @@ +# \ContainersApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**containers_create**](ContainersApi.md#containers_create) | **POST** /v1/containers | +[**containers_destroy**](ContainersApi.md#containers_destroy) | **DELETE** /v1/containers/{container} | +[**containers_get**](ContainersApi.md#containers_get) | **GET** /v1/containers/{container} | +[**containers_list**](ContainersApi.md#containers_list) | **GET** /v1/containers | +[**containers_upgrade**](ContainersApi.md#containers_upgrade) | **POST** /v1/containers/{container}/upgrade | +[**containers_upgrade_all**](ContainersApi.md#containers_upgrade_all) | **POST** /v1/containers/upgrade | + + + +## containers_create + +> crate::models::ContainersCreateContainerResponse containers_create(containers_create_container_request, project, environment, endpoint_type) + + +Create a new container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**containers_create_container_request** | [**ContainersCreateContainerRequest**](ContainersCreateContainerRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ContainersEndpointType**](.md)> | | | + +### Return type + +[**crate::models::ContainersCreateContainerResponse**](ContainersCreateContainerResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_destroy + +> serde_json::Value containers_destroy(container, project, environment, override_kill_timeout) + + +Destroy a container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to destroy | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**override_kill_timeout** | Option<**i64**> | The duration to wait for in milliseconds before killing the container. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. | | + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_get + +> crate::models::ContainersGetContainerResponse containers_get(container, project, environment, endpoint_type) + + +Gets a container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to destroy | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ContainersEndpointType**](.md)> | | | + +### Return type + +[**crate::models::ContainersGetContainerResponse**](ContainersGetContainerResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_list + +> crate::models::ContainersListContainersResponse containers_list(project, environment, endpoint_type, tags_json, include_destroyed, cursor) + + +Lists all containers associated with the token used. Can be filtered by tags in the query string. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**endpoint_type** | Option<[**ContainersEndpointType**](.md)> | | | +**tags_json** | Option<**String**> | | | +**include_destroyed** | Option<**bool**> | | | +**cursor** | Option<**String**> | | | + +### Return type + +[**crate::models::ContainersListContainersResponse**](ContainersListContainersResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_upgrade + +> serde_json::Value containers_upgrade(container, containers_upgrade_container_request, project, environment) + + +Upgrades a container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to upgrade | [required] | +**containers_upgrade_container_request** | [**ContainersUpgradeContainerRequest**](ContainersUpgradeContainerRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**serde_json::Value**](serde_json::Value.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## containers_upgrade_all + +> crate::models::ContainersUpgradeAllContainersResponse containers_upgrade_all(containers_upgrade_all_containers_request, project, environment) + + +Upgrades all containers matching the given tags. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**containers_upgrade_all_containers_request** | [**ContainersUpgradeAllContainersRequest**](ContainersUpgradeAllContainersRequest.md) | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ContainersUpgradeAllContainersResponse**](ContainersUpgradeAllContainersResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/runtime/rust/docs/ContainersContainer.md b/sdks/api/runtime/rust/docs/ContainersContainer.md new file mode 100644 index 0000000000..d6d59a5979 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersContainer.md @@ -0,0 +1,20 @@ +# ContainersContainer + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **String** | Can be a UUID or base36 encoded binary data. | +**region** | **String** | | +**tags** | Option<[**serde_json::Value**](.md)> | | +**runtime** | [**crate::models::ContainersRuntime**](ContainersRuntime.md) | | +**network** | [**crate::models::ContainersNetwork**](ContainersNetwork.md) | | +**resources** | [**crate::models::ContainersResources**](ContainersResources.md) | | +**lifecycle** | [**crate::models::ContainersLifecycle**](ContainersLifecycle.md) | | +**created_at** | **String** | RFC3339 timestamp | +**started_at** | Option<**String**> | RFC3339 timestamp | [optional] +**destroyed_at** | Option<**String**> | RFC3339 timestamp | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersCreateContainerNetworkRequest.md b/sdks/api/runtime/rust/docs/ContainersCreateContainerNetworkRequest.md new file mode 100644 index 0000000000..75819c3d43 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersCreateContainerNetworkRequest.md @@ -0,0 +1,13 @@ +# ContainersCreateContainerNetworkRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**mode** | Option<[**crate::models::ContainersNetworkMode**](ContainersNetworkMode.md)> | | [optional] +**ports** | Option<[**::std::collections::HashMap**](ContainersCreateContainerPortRequest.md)> | | [optional] +**wait_ready** | Option<**bool**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersCreateContainerPortRequest.md b/sdks/api/runtime/rust/docs/ContainersCreateContainerPortRequest.md new file mode 100644 index 0000000000..1b0e5932e5 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersCreateContainerPortRequest.md @@ -0,0 +1,13 @@ +# ContainersCreateContainerPortRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | [**crate::models::ContainersPortProtocol**](ContainersPortProtocol.md) | | +**internal_port** | Option<**i32**> | | [optional] +**routing** | Option<[**crate::models::ContainersPortRouting**](ContainersPortRouting.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersCreateContainerRequest.md b/sdks/api/runtime/rust/docs/ContainersCreateContainerRequest.md new file mode 100644 index 0000000000..0eb6be73e2 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersCreateContainerRequest.md @@ -0,0 +1,18 @@ +# ContainersCreateContainerRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**region** | Option<**String**> | | [optional] +**tags** | Option<[**serde_json::Value**](.md)> | | +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] +**runtime** | Option<[**crate::models::ContainersCreateContainerRuntimeRequest**](ContainersCreateContainerRuntimeRequest.md)> | | [optional] +**network** | Option<[**crate::models::ContainersCreateContainerNetworkRequest**](ContainersCreateContainerNetworkRequest.md)> | | [optional] +**resources** | [**crate::models::ContainersResources**](ContainersResources.md) | | +**lifecycle** | Option<[**crate::models::ContainersLifecycle**](ContainersLifecycle.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersCreateContainerResponse.md b/sdks/api/runtime/rust/docs/ContainersCreateContainerResponse.md new file mode 100644 index 0000000000..cb0111c5b1 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersCreateContainerResponse.md @@ -0,0 +1,11 @@ +# ContainersCreateContainerResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container** | [**crate::models::ContainersContainer**](ContainersContainer.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md b/sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md new file mode 100644 index 0000000000..d724ce6351 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeNetworkRequest.md @@ -0,0 +1,11 @@ +# ContainersCreateContainerRuntimeNetworkRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**endpoint_type** | [**crate::models::ContainersEndpointType**](ContainersEndpointType.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeRequest.md b/sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeRequest.md new file mode 100644 index 0000000000..0873cd62be --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersCreateContainerRuntimeRequest.md @@ -0,0 +1,12 @@ +# ContainersCreateContainerRuntimeRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**environment** | Option<**::std::collections::HashMap**> | | [optional] +**network** | Option<[**crate::models::ContainersCreateContainerRuntimeNetworkRequest**](ContainersCreateContainerRuntimeNetworkRequest.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersEndpointType.md b/sdks/api/runtime/rust/docs/ContainersEndpointType.md new file mode 100644 index 0000000000..62e04640ae --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersEndpointType.md @@ -0,0 +1,10 @@ +# ContainersEndpointType + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersGetContainerLogsResponse.md b/sdks/api/runtime/rust/docs/ContainersGetContainerLogsResponse.md new file mode 100644 index 0000000000..ff92071c09 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersGetContainerLogsResponse.md @@ -0,0 +1,17 @@ +# ContainersGetContainerLogsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container_ids** | **Vec** | List of container IDs in these logs. The order of these correspond to the index in the log entry. | +**lines** | **Vec** | Sorted old to new. | +**timestamps** | **Vec** | Sorted old to new. | +**streams** | **Vec** | Streams the logs came from. 0 = stdout 1 = stderr | +**foreigns** | **Vec** | List of flags denoting if this log is not directly from the container. | +**container_indices** | **Vec** | Index of the container that this log was for. Use this index to look the full ID in `container_ids`. | +**watch** | [**crate::models::WatchResponse**](WatchResponse.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersGetContainerMetricsResponse.md b/sdks/api/runtime/rust/docs/ContainersGetContainerMetricsResponse.md new file mode 100644 index 0000000000..c6c0c87b35 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersGetContainerMetricsResponse.md @@ -0,0 +1,15 @@ +# ContainersGetContainerMetricsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container_ids** | **Vec** | | +**metric_names** | **Vec** | | +**metric_attributes** | [**Vec<::std::collections::HashMap>**](map.md) | | +**metric_types** | **Vec** | | +**metric_values** | [**Vec>**](array.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersGetContainerResponse.md b/sdks/api/runtime/rust/docs/ContainersGetContainerResponse.md new file mode 100644 index 0000000000..5cc164ff76 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersGetContainerResponse.md @@ -0,0 +1,11 @@ +# ContainersGetContainerResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**container** | [**crate::models::ContainersContainer**](ContainersContainer.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersLifecycle.md b/sdks/api/runtime/rust/docs/ContainersLifecycle.md new file mode 100644 index 0000000000..6e5dc7cef0 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersLifecycle.md @@ -0,0 +1,12 @@ +# ContainersLifecycle + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kill_timeout** | Option<**i64**> | The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. | [optional] +**durable** | Option<**bool**> | If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersListContainersResponse.md b/sdks/api/runtime/rust/docs/ContainersListContainersResponse.md new file mode 100644 index 0000000000..c1a4119e0d --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersListContainersResponse.md @@ -0,0 +1,12 @@ +# ContainersListContainersResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**containers** | [**Vec**](ContainersContainer.md) | A list of containers for the project associated with the token. | +**pagination** | [**crate::models::Pagination**](Pagination.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersLogsApi.md b/sdks/api/runtime/rust/docs/ContainersLogsApi.md new file mode 100644 index 0000000000..c4c1c3f5cb --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersLogsApi.md @@ -0,0 +1,46 @@ +# \ContainersLogsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**containers_logs_get**](ContainersLogsApi.md#containers_logs_get) | **GET** /v1/containers/logs | + + + +## containers_logs_get + +> crate::models::ContainersGetContainerLogsResponse containers_logs_get(stream, container_ids_json, project, environment, search_text, search_case_sensitive, search_enable_regex, watch_index) + + +Returns the logs for a given container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**stream** | [**ContainersQueryLogStream**](.md) | | [required] | +**container_ids_json** | **String** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | +**search_text** | Option<**String**> | | | +**search_case_sensitive** | Option<**bool**> | | | +**search_enable_regex** | Option<**bool**> | | | +**watch_index** | Option<**String**> | A query parameter denoting the requests watch index. | | + +### Return type + +[**crate::models::ContainersGetContainerLogsResponse**](ContainersGetContainerLogsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/runtime/rust/docs/ContainersMetricsApi.md b/sdks/api/runtime/rust/docs/ContainersMetricsApi.md new file mode 100644 index 0000000000..0d08a99321 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersMetricsApi.md @@ -0,0 +1,44 @@ +# \ContainersMetricsApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**containers_metrics_get**](ContainersMetricsApi.md#containers_metrics_get) | **GET** /v1/containers/{container}/metrics/history | + + + +## containers_metrics_get + +> crate::models::ContainersGetContainerMetricsResponse containers_metrics_get(container, start, end, interval, project, environment) + + +Returns the metrics for a given container. + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**container** | **String** | The id of the container to destroy | [required] | +**start** | **i32** | | [required] | +**end** | **i32** | | [required] | +**interval** | **i32** | | [required] | +**project** | Option<**String**> | | | +**environment** | Option<**String**> | | | + +### Return type + +[**crate::models::ContainersGetContainerMetricsResponse**](ContainersGetContainerMetricsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/api/runtime/rust/docs/ContainersNetwork.md b/sdks/api/runtime/rust/docs/ContainersNetwork.md new file mode 100644 index 0000000000..41b81ebb4f --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersNetwork.md @@ -0,0 +1,12 @@ +# ContainersNetwork + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**mode** | [**crate::models::ContainersNetworkMode**](ContainersNetworkMode.md) | | +**ports** | [**::std::collections::HashMap**](ContainersPort.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersNetworkMode.md b/sdks/api/runtime/rust/docs/ContainersNetworkMode.md new file mode 100644 index 0000000000..bc4a4e115b --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersNetworkMode.md @@ -0,0 +1,10 @@ +# ContainersNetworkMode + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersPort.md b/sdks/api/runtime/rust/docs/ContainersPort.md new file mode 100644 index 0000000000..46ace3b43e --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersPort.md @@ -0,0 +1,17 @@ +# ContainersPort + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | [**crate::models::ContainersPortProtocol**](ContainersPortProtocol.md) | | +**internal_port** | Option<**i32**> | | [optional] +**hostname** | Option<**String**> | | [optional] +**port** | Option<**i32**> | | [optional] +**path** | Option<**String**> | | [optional] +**url** | Option<**String**> | Fully formed connection URL including protocol, hostname, port, and path, if applicable. | [optional] +**routing** | [**crate::models::ContainersPortRouting**](ContainersPortRouting.md) | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersPortProtocol.md b/sdks/api/runtime/rust/docs/ContainersPortProtocol.md new file mode 100644 index 0000000000..a69a314e54 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersPortProtocol.md @@ -0,0 +1,10 @@ +# ContainersPortProtocol + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersPortRouting.md b/sdks/api/runtime/rust/docs/ContainersPortRouting.md new file mode 100644 index 0000000000..dd0c620964 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersPortRouting.md @@ -0,0 +1,12 @@ +# ContainersPortRouting + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**guard** | Option<[**serde_json::Value**](.md)> | | [optional] +**host** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersQueryLogStream.md b/sdks/api/runtime/rust/docs/ContainersQueryLogStream.md new file mode 100644 index 0000000000..84b7004e27 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersQueryLogStream.md @@ -0,0 +1,10 @@ +# ContainersQueryLogStream + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersResources.md b/sdks/api/runtime/rust/docs/ContainersResources.md new file mode 100644 index 0000000000..a32f979d17 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersResources.md @@ -0,0 +1,12 @@ +# ContainersResources + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**cpu** | **i32** | The number of CPU cores in millicores, or 1/1000 of a core. For example, 1/8 of a core would be 125 millicores, and 1 core would be 1000 millicores. | +**memory** | **i32** | The amount of memory in megabytes | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersRuntime.md b/sdks/api/runtime/rust/docs/ContainersRuntime.md new file mode 100644 index 0000000000..ea30224b4b --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersRuntime.md @@ -0,0 +1,13 @@ +# ContainersRuntime + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**build** | [**uuid::Uuid**](uuid::Uuid.md) | | +**arguments** | Option<**Vec**> | | [optional] +**environment** | Option<**::std::collections::HashMap**> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersRequest.md b/sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersRequest.md new file mode 100644 index 0000000000..bd912472ca --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersRequest.md @@ -0,0 +1,13 @@ +# ContainersUpgradeAllContainersRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**tags** | Option<[**serde_json::Value**](.md)> | | +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersResponse.md b/sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersResponse.md new file mode 100644 index 0000000000..04eb18d657 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersUpgradeAllContainersResponse.md @@ -0,0 +1,11 @@ +# ContainersUpgradeAllContainersResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**count** | **i64** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/docs/ContainersUpgradeContainerRequest.md b/sdks/api/runtime/rust/docs/ContainersUpgradeContainerRequest.md new file mode 100644 index 0000000000..558addcae0 --- /dev/null +++ b/sdks/api/runtime/rust/docs/ContainersUpgradeContainerRequest.md @@ -0,0 +1,12 @@ +# ContainersUpgradeContainerRequest + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**build** | Option<[**uuid::Uuid**](uuid::Uuid.md)> | | [optional] +**build_tags** | Option<[**serde_json::Value**](.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/api/runtime/rust/src/apis/actors_api.rs b/sdks/api/runtime/rust/src/apis/actors_api.rs index 9580242611..f35907fa79 100644 --- a/sdks/api/runtime/rust/src/apis/actors_api.rs +++ b/sdks/api/runtime/rust/src/apis/actors_api.rs @@ -126,7 +126,7 @@ pub async fn actors_create(configuration: &configuration::Configuration, actors_ let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { @@ -167,7 +167,7 @@ pub async fn actors_destroy(configuration: &configuration::Configuration, actor: let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/{actor}", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); + let local_var_uri_str = format!("{}/v2/actors/{actor}", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); let mut local_var_req_builder = local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { @@ -207,7 +207,7 @@ pub async fn actors_get(configuration: &configuration::Configuration, actor: &st let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/{actor}", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); + let local_var_uri_str = format!("{}/v2/actors/{actor}", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { @@ -247,7 +247,7 @@ pub async fn actors_list(configuration: &configuration::Configuration, project: let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { @@ -339,7 +339,7 @@ pub async fn actors_upgrade(configuration: &configuration::Configuration, actor: let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/{actor}/upgrade", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); + let local_var_uri_str = format!("{}/v2/actors/{actor}/upgrade", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { @@ -377,7 +377,7 @@ pub async fn actors_upgrade_all(configuration: &configuration::Configuration, ac let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/upgrade", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors/upgrade", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { diff --git a/sdks/api/runtime/rust/src/apis/actors_logs_api.rs b/sdks/api/runtime/rust/src/apis/actors_logs_api.rs index ca6fdbf730..fb540f3afa 100644 --- a/sdks/api/runtime/rust/src/apis/actors_logs_api.rs +++ b/sdks/api/runtime/rust/src/apis/actors_logs_api.rs @@ -80,7 +80,7 @@ pub async fn actors_logs_get(configuration: &configuration::Configuration, proje let local_var_client = &local_var_configuration.client; - let local_var_uri_str = format!("{}/actors/logs", local_var_configuration.base_path); + let local_var_uri_str = format!("{}/v2/actors/logs", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_str) = project { diff --git a/sdks/api/runtime/rust/src/apis/actors_metrics_api.rs b/sdks/api/runtime/rust/src/apis/actors_metrics_api.rs new file mode 100644 index 0000000000..0ada251d03 --- /dev/null +++ b/sdks/api/runtime/rust/src/apis/actors_metrics_api.rs @@ -0,0 +1,71 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`actors_metrics_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ActorsMetricsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +/// Returns the metrics for a given actor. +pub async fn actors_metrics_get(configuration: &configuration::Configuration, actor: &str, start: i32, end: i32, interval: i32, project: Option<&str>, environment: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v2/actors/{actor}/metrics/history", local_var_configuration.base_path, actor=crate::apis::urlencode(actor)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("start", &start.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("end", &end.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("interval", &interval.to_string())]); + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/api/runtime/rust/src/apis/containers_api.rs b/sdks/api/runtime/rust/src/apis/containers_api.rs new file mode 100644 index 0000000000..86da7cf31b --- /dev/null +++ b/sdks/api/runtime/rust/src/apis/containers_api.rs @@ -0,0 +1,342 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`containers_create`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersCreateError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_destroy`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersDestroyError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_list`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersListError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_upgrade`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersUpgradeError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`containers_upgrade_all`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersUpgradeAllError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +/// Create a new container. +pub async fn containers_create(configuration: &configuration::Configuration, containers_create_container_request: crate::models::ContainersCreateContainerRequest, project: Option<&str>, environment: Option<&str>, endpoint_type: Option) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers", local_var_configuration.base_path); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&containers_create_container_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Destroy a container. +pub async fn containers_destroy(configuration: &configuration::Configuration, container: &str, project: Option<&str>, environment: Option<&str>, override_kill_timeout: Option) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/{container}", local_var_configuration.base_path, container=crate::apis::urlencode(container)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::DELETE, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = override_kill_timeout { + local_var_req_builder = local_var_req_builder.query(&[("override_kill_timeout", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Gets a container. +pub async fn containers_get(configuration: &configuration::Configuration, container: &str, project: Option<&str>, environment: Option<&str>, endpoint_type: Option) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/{container}", local_var_configuration.base_path, container=crate::apis::urlencode(container)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Lists all containers associated with the token used. Can be filtered by tags in the query string. +pub async fn containers_list(configuration: &configuration::Configuration, project: Option<&str>, environment: Option<&str>, endpoint_type: Option, tags_json: Option<&str>, include_destroyed: Option, cursor: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers", local_var_configuration.base_path); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = endpoint_type { + local_var_req_builder = local_var_req_builder.query(&[("endpoint_type", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = tags_json { + local_var_req_builder = local_var_req_builder.query(&[("tags_json", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = include_destroyed { + local_var_req_builder = local_var_req_builder.query(&[("include_destroyed", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = cursor { + local_var_req_builder = local_var_req_builder.query(&[("cursor", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Upgrades a container. +pub async fn containers_upgrade(configuration: &configuration::Configuration, container: &str, containers_upgrade_container_request: crate::models::ContainersUpgradeContainerRequest, project: Option<&str>, environment: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/{container}/upgrade", local_var_configuration.base_path, container=crate::apis::urlencode(container)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&containers_upgrade_container_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +/// Upgrades all containers matching the given tags. +pub async fn containers_upgrade_all(configuration: &configuration::Configuration, containers_upgrade_all_containers_request: crate::models::ContainersUpgradeAllContainersRequest, project: Option<&str>, environment: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/upgrade", local_var_configuration.base_path); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&containers_upgrade_all_containers_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/api/runtime/rust/src/apis/containers_logs_api.rs b/sdks/api/runtime/rust/src/apis/containers_logs_api.rs new file mode 100644 index 0000000000..161d93ab8f --- /dev/null +++ b/sdks/api/runtime/rust/src/apis/containers_logs_api.rs @@ -0,0 +1,82 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`containers_logs_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersLogsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +/// Returns the logs for a given container. +pub async fn containers_logs_get(configuration: &configuration::Configuration, stream: crate::models::ContainersQueryLogStream, container_ids_json: &str, project: Option<&str>, environment: Option<&str>, search_text: Option<&str>, search_case_sensitive: Option, search_enable_regex: Option, watch_index: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/logs", local_var_configuration.base_path); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("stream", &stream.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("container_ids_json", &container_ids_json.to_string())]); + if let Some(ref local_var_str) = search_text { + local_var_req_builder = local_var_req_builder.query(&[("search_text", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = search_case_sensitive { + local_var_req_builder = local_var_req_builder.query(&[("search_case_sensitive", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = search_enable_regex { + local_var_req_builder = local_var_req_builder.query(&[("search_enable_regex", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = watch_index { + local_var_req_builder = local_var_req_builder.query(&[("watch_index", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/api/runtime/rust/src/apis/containers_metrics_api.rs b/sdks/api/runtime/rust/src/apis/containers_metrics_api.rs new file mode 100644 index 0000000000..1518f17681 --- /dev/null +++ b/sdks/api/runtime/rust/src/apis/containers_metrics_api.rs @@ -0,0 +1,71 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`containers_metrics_get`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ContainersMetricsGetError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +/// Returns the metrics for a given container. +pub async fn containers_metrics_get(configuration: &configuration::Configuration, container: &str, start: i32, end: i32, interval: i32, project: Option<&str>, environment: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/v1/containers/{container}/metrics/history", local_var_configuration.base_path, container=crate::apis::urlencode(container)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = project { + local_var_req_builder = local_var_req_builder.query(&[("project", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = environment { + local_var_req_builder = local_var_req_builder.query(&[("environment", &local_var_str.to_string())]); + } + local_var_req_builder = local_var_req_builder.query(&[("start", &start.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("end", &end.to_string())]); + local_var_req_builder = local_var_req_builder.query(&[("interval", &interval.to_string())]); + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/api/runtime/rust/src/apis/mod.rs b/sdks/api/runtime/rust/src/apis/mod.rs index b977825b38..c9b06af798 100644 --- a/sdks/api/runtime/rust/src/apis/mod.rs +++ b/sdks/api/runtime/rust/src/apis/mod.rs @@ -92,7 +92,11 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String pub mod actors_api; pub mod actors_logs_api; +pub mod actors_metrics_api; pub mod builds_api; +pub mod containers_api; +pub mod containers_logs_api; +pub mod containers_metrics_api; pub mod regions_api; pub mod routes_api; diff --git a/sdks/api/runtime/rust/src/apis/mod.rs.orig b/sdks/api/runtime/rust/src/apis/mod.rs.orig index b977825b38..c9b06af798 100644 --- a/sdks/api/runtime/rust/src/apis/mod.rs.orig +++ b/sdks/api/runtime/rust/src/apis/mod.rs.orig @@ -92,7 +92,11 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String pub mod actors_api; pub mod actors_logs_api; +pub mod actors_metrics_api; pub mod builds_api; +pub mod containers_api; +pub mod containers_logs_api; +pub mod containers_metrics_api; pub mod regions_api; pub mod routes_api; diff --git a/sdks/api/runtime/rust/src/models/actors_actor.rs b/sdks/api/runtime/rust/src/models/actors_actor.rs index 2e16a52360..425c5c846b 100644 --- a/sdks/api/runtime/rust/src/models/actors_actor.rs +++ b/sdks/api/runtime/rust/src/models/actors_actor.rs @@ -24,8 +24,6 @@ pub struct ActorsActor { pub runtime: Box, #[serde(rename = "network")] pub network: Box, - #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] - pub resources: Option>, #[serde(rename = "lifecycle")] pub lifecycle: Box, /// RFC3339 timestamp @@ -47,7 +45,6 @@ impl ActorsActor { tags, runtime: Box::new(runtime), network: Box::new(network), - resources: None, lifecycle: Box::new(lifecycle), created_at, started_at: None, diff --git a/sdks/api/runtime/rust/src/models/actors_create_actor_request.rs b/sdks/api/runtime/rust/src/models/actors_create_actor_request.rs index 94b4d56609..5256ec681c 100644 --- a/sdks/api/runtime/rust/src/models/actors_create_actor_request.rs +++ b/sdks/api/runtime/rust/src/models/actors_create_actor_request.rs @@ -25,8 +25,6 @@ pub struct ActorsCreateActorRequest { pub runtime: Option>, #[serde(rename = "network", skip_serializing_if = "Option::is_none")] pub network: Option>, - #[serde(rename = "resources", skip_serializing_if = "Option::is_none")] - pub resources: Option>, #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] pub lifecycle: Option>, } @@ -40,7 +38,6 @@ impl ActorsCreateActorRequest { build_tags: None, runtime: None, network: None, - resources: None, lifecycle: None, } } diff --git a/sdks/api/runtime/rust/src/models/actors_get_actor_metrics_response.rs b/sdks/api/runtime/rust/src/models/actors_get_actor_metrics_response.rs new file mode 100644 index 0000000000..4178642fde --- /dev/null +++ b/sdks/api/runtime/rust/src/models/actors_get_actor_metrics_response.rs @@ -0,0 +1,40 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ActorsGetActorMetricsResponse { + #[serde(rename = "actor_ids")] + pub actor_ids: Vec, + #[serde(rename = "metric_names")] + pub metric_names: Vec, + #[serde(rename = "metric_attributes")] + pub metric_attributes: Vec<::std::collections::HashMap>, + #[serde(rename = "metric_types")] + pub metric_types: Vec, + #[serde(rename = "metric_values")] + pub metric_values: Vec>, +} + +impl ActorsGetActorMetricsResponse { + pub fn new(actor_ids: Vec, metric_names: Vec, metric_attributes: Vec<::std::collections::HashMap>, metric_types: Vec, metric_values: Vec>) -> ActorsGetActorMetricsResponse { + ActorsGetActorMetricsResponse { + actor_ids, + metric_names, + metric_attributes, + metric_types, + metric_values, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_container.rs b/sdks/api/runtime/rust/src/models/containers_container.rs new file mode 100644 index 0000000000..8bc53f90d7 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_container.rs @@ -0,0 +1,59 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersContainer { + /// Can be a UUID or base36 encoded binary data. + #[serde(rename = "id")] + pub id: String, + #[serde(rename = "region")] + pub region: String, + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "runtime")] + pub runtime: Box, + #[serde(rename = "network")] + pub network: Box, + #[serde(rename = "resources")] + pub resources: Box, + #[serde(rename = "lifecycle")] + pub lifecycle: Box, + /// RFC3339 timestamp + #[serde(rename = "created_at")] + pub created_at: String, + /// RFC3339 timestamp + #[serde(rename = "started_at", skip_serializing_if = "Option::is_none")] + pub started_at: Option, + /// RFC3339 timestamp + #[serde(rename = "destroyed_at", skip_serializing_if = "Option::is_none")] + pub destroyed_at: Option, +} + +impl ContainersContainer { + pub fn new(id: String, region: String, tags: Option, runtime: crate::models::ContainersRuntime, network: crate::models::ContainersNetwork, resources: crate::models::ContainersResources, lifecycle: crate::models::ContainersLifecycle, created_at: String) -> ContainersContainer { + ContainersContainer { + id, + region, + tags, + runtime: Box::new(runtime), + network: Box::new(network), + resources: Box::new(resources), + lifecycle: Box::new(lifecycle), + created_at, + started_at: None, + destroyed_at: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_create_container_network_request.rs b/sdks/api/runtime/rust/src/models/containers_create_container_network_request.rs new file mode 100644 index 0000000000..ac0e9726c3 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_create_container_network_request.rs @@ -0,0 +1,34 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerNetworkRequest { + #[serde(rename = "mode", skip_serializing_if = "Option::is_none")] + pub mode: Option, + #[serde(rename = "ports", skip_serializing_if = "Option::is_none")] + pub ports: Option<::std::collections::HashMap>, + #[serde(rename = "wait_ready", skip_serializing_if = "Option::is_none")] + pub wait_ready: Option, +} + +impl ContainersCreateContainerNetworkRequest { + pub fn new() -> ContainersCreateContainerNetworkRequest { + ContainersCreateContainerNetworkRequest { + mode: None, + ports: None, + wait_ready: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_create_container_port_request.rs b/sdks/api/runtime/rust/src/models/containers_create_container_port_request.rs new file mode 100644 index 0000000000..3741e52d6a --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_create_container_port_request.rs @@ -0,0 +1,34 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerPortRequest { + #[serde(rename = "protocol")] + pub protocol: crate::models::ContainersPortProtocol, + #[serde(rename = "internal_port", skip_serializing_if = "Option::is_none")] + pub internal_port: Option, + #[serde(rename = "routing", skip_serializing_if = "Option::is_none")] + pub routing: Option>, +} + +impl ContainersCreateContainerPortRequest { + pub fn new(protocol: crate::models::ContainersPortProtocol) -> ContainersCreateContainerPortRequest { + ContainersCreateContainerPortRequest { + protocol, + internal_port: None, + routing: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_create_container_request.rs b/sdks/api/runtime/rust/src/models/containers_create_container_request.rs new file mode 100644 index 0000000000..ae6763b81e --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_create_container_request.rs @@ -0,0 +1,49 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerRequest { + #[serde(rename = "region", skip_serializing_if = "Option::is_none")] + pub region: Option, + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde(rename = "build_tags", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")] + pub build_tags: Option>, + #[serde(rename = "runtime", skip_serializing_if = "Option::is_none")] + pub runtime: Option>, + #[serde(rename = "network", skip_serializing_if = "Option::is_none")] + pub network: Option>, + #[serde(rename = "resources")] + pub resources: Box, + #[serde(rename = "lifecycle", skip_serializing_if = "Option::is_none")] + pub lifecycle: Option>, +} + +impl ContainersCreateContainerRequest { + pub fn new(tags: Option, resources: crate::models::ContainersResources) -> ContainersCreateContainerRequest { + ContainersCreateContainerRequest { + region: None, + tags, + build: None, + build_tags: None, + runtime: None, + network: None, + resources: Box::new(resources), + lifecycle: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_create_container_response.rs b/sdks/api/runtime/rust/src/models/containers_create_container_response.rs new file mode 100644 index 0000000000..b2e66e06e3 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_create_container_response.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerResponse { + #[serde(rename = "container")] + pub container: Box, +} + +impl ContainersCreateContainerResponse { + pub fn new(container: crate::models::ContainersContainer) -> ContainersCreateContainerResponse { + ContainersCreateContainerResponse { + container: Box::new(container), + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_create_container_runtime_network_request.rs b/sdks/api/runtime/rust/src/models/containers_create_container_runtime_network_request.rs new file mode 100644 index 0000000000..8e61dce98a --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_create_container_runtime_network_request.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerRuntimeNetworkRequest { + #[serde(rename = "endpoint_type")] + pub endpoint_type: crate::models::ContainersEndpointType, +} + +impl ContainersCreateContainerRuntimeNetworkRequest { + pub fn new(endpoint_type: crate::models::ContainersEndpointType) -> ContainersCreateContainerRuntimeNetworkRequest { + ContainersCreateContainerRuntimeNetworkRequest { + endpoint_type, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_create_container_runtime_request.rs b/sdks/api/runtime/rust/src/models/containers_create_container_runtime_request.rs new file mode 100644 index 0000000000..8915fe4b94 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_create_container_runtime_request.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersCreateContainerRuntimeRequest { + #[serde(rename = "environment", skip_serializing_if = "Option::is_none")] + pub environment: Option<::std::collections::HashMap>, + #[serde(rename = "network", skip_serializing_if = "Option::is_none")] + pub network: Option>, +} + +impl ContainersCreateContainerRuntimeRequest { + pub fn new() -> ContainersCreateContainerRuntimeRequest { + ContainersCreateContainerRuntimeRequest { + environment: None, + network: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_endpoint_type.rs b/sdks/api/runtime/rust/src/models/containers_endpoint_type.rs new file mode 100644 index 0000000000..60e19113e6 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_endpoint_type.rs @@ -0,0 +1,39 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersEndpointType { + #[serde(rename = "hostname")] + Hostname, + #[serde(rename = "path")] + Path, + +} + +impl ToString for ContainersEndpointType { + fn to_string(&self) -> String { + match self { + Self::Hostname => String::from("hostname"), + Self::Path => String::from("path"), + } + } +} + +impl Default for ContainersEndpointType { + fn default() -> ContainersEndpointType { + Self::Hostname + } +} + + + + diff --git a/sdks/api/runtime/rust/src/models/containers_get_container_logs_response.rs b/sdks/api/runtime/rust/src/models/containers_get_container_logs_response.rs new file mode 100644 index 0000000000..72e05ae8b6 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_get_container_logs_response.rs @@ -0,0 +1,52 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersGetContainerLogsResponse { + /// List of container IDs in these logs. The order of these correspond to the index in the log entry. + #[serde(rename = "container_ids")] + pub container_ids: Vec, + /// Sorted old to new. + #[serde(rename = "lines")] + pub lines: Vec, + /// Sorted old to new. + #[serde(rename = "timestamps")] + pub timestamps: Vec, + /// Streams the logs came from. 0 = stdout 1 = stderr + #[serde(rename = "streams")] + pub streams: Vec, + /// List of flags denoting if this log is not directly from the container. + #[serde(rename = "foreigns")] + pub foreigns: Vec, + /// Index of the container that this log was for. Use this index to look the full ID in `container_ids`. + #[serde(rename = "container_indices")] + pub container_indices: Vec, + #[serde(rename = "watch")] + pub watch: Box, +} + +impl ContainersGetContainerLogsResponse { + pub fn new(container_ids: Vec, lines: Vec, timestamps: Vec, streams: Vec, foreigns: Vec, container_indices: Vec, watch: crate::models::WatchResponse) -> ContainersGetContainerLogsResponse { + ContainersGetContainerLogsResponse { + container_ids, + lines, + timestamps, + streams, + foreigns, + container_indices, + watch: Box::new(watch), + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_get_container_metrics_response.rs b/sdks/api/runtime/rust/src/models/containers_get_container_metrics_response.rs new file mode 100644 index 0000000000..971ed8aef2 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_get_container_metrics_response.rs @@ -0,0 +1,40 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersGetContainerMetricsResponse { + #[serde(rename = "container_ids")] + pub container_ids: Vec, + #[serde(rename = "metric_names")] + pub metric_names: Vec, + #[serde(rename = "metric_attributes")] + pub metric_attributes: Vec<::std::collections::HashMap>, + #[serde(rename = "metric_types")] + pub metric_types: Vec, + #[serde(rename = "metric_values")] + pub metric_values: Vec>, +} + +impl ContainersGetContainerMetricsResponse { + pub fn new(container_ids: Vec, metric_names: Vec, metric_attributes: Vec<::std::collections::HashMap>, metric_types: Vec, metric_values: Vec>) -> ContainersGetContainerMetricsResponse { + ContainersGetContainerMetricsResponse { + container_ids, + metric_names, + metric_attributes, + metric_types, + metric_values, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_get_container_response.rs b/sdks/api/runtime/rust/src/models/containers_get_container_response.rs new file mode 100644 index 0000000000..fedbf40950 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_get_container_response.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersGetContainerResponse { + #[serde(rename = "container")] + pub container: Box, +} + +impl ContainersGetContainerResponse { + pub fn new(container: crate::models::ContainersContainer) -> ContainersGetContainerResponse { + ContainersGetContainerResponse { + container: Box::new(container), + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_lifecycle.rs b/sdks/api/runtime/rust/src/models/containers_lifecycle.rs new file mode 100644 index 0000000000..decbcb830b --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_lifecycle.rs @@ -0,0 +1,33 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersLifecycle { + /// The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. + #[serde(rename = "kill_timeout", skip_serializing_if = "Option::is_none")] + pub kill_timeout: Option, + /// If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. + #[serde(rename = "durable", skip_serializing_if = "Option::is_none")] + pub durable: Option, +} + +impl ContainersLifecycle { + pub fn new() -> ContainersLifecycle { + ContainersLifecycle { + kill_timeout: None, + durable: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_list_containers_response.rs b/sdks/api/runtime/rust/src/models/containers_list_containers_response.rs new file mode 100644 index 0000000000..07659413f0 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_list_containers_response.rs @@ -0,0 +1,32 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersListContainersResponse { + /// A list of containers for the project associated with the token. + #[serde(rename = "containers")] + pub containers: Vec, + #[serde(rename = "pagination")] + pub pagination: Box, +} + +impl ContainersListContainersResponse { + pub fn new(containers: Vec, pagination: crate::models::Pagination) -> ContainersListContainersResponse { + ContainersListContainersResponse { + containers, + pagination: Box::new(pagination), + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_network.rs b/sdks/api/runtime/rust/src/models/containers_network.rs new file mode 100644 index 0000000000..09e41996f3 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_network.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersNetwork { + #[serde(rename = "mode")] + pub mode: crate::models::ContainersNetworkMode, + #[serde(rename = "ports")] + pub ports: ::std::collections::HashMap, +} + +impl ContainersNetwork { + pub fn new(mode: crate::models::ContainersNetworkMode, ports: ::std::collections::HashMap) -> ContainersNetwork { + ContainersNetwork { + mode, + ports, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_network_mode.rs b/sdks/api/runtime/rust/src/models/containers_network_mode.rs new file mode 100644 index 0000000000..a2d7589c1f --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_network_mode.rs @@ -0,0 +1,39 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersNetworkMode { + #[serde(rename = "bridge")] + Bridge, + #[serde(rename = "host")] + Host, + +} + +impl ToString for ContainersNetworkMode { + fn to_string(&self) -> String { + match self { + Self::Bridge => String::from("bridge"), + Self::Host => String::from("host"), + } + } +} + +impl Default for ContainersNetworkMode { + fn default() -> ContainersNetworkMode { + Self::Bridge + } +} + + + + diff --git a/sdks/api/runtime/rust/src/models/containers_port.rs b/sdks/api/runtime/rust/src/models/containers_port.rs new file mode 100644 index 0000000000..70d72c6831 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_port.rs @@ -0,0 +1,47 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersPort { + #[serde(rename = "protocol")] + pub protocol: crate::models::ContainersPortProtocol, + #[serde(rename = "internal_port", skip_serializing_if = "Option::is_none")] + pub internal_port: Option, + #[serde(rename = "hostname", skip_serializing_if = "Option::is_none")] + pub hostname: Option, + #[serde(rename = "port", skip_serializing_if = "Option::is_none")] + pub port: Option, + #[serde(rename = "path", skip_serializing_if = "Option::is_none")] + pub path: Option, + /// Fully formed connection URL including protocol, hostname, port, and path, if applicable. + #[serde(rename = "url", skip_serializing_if = "Option::is_none")] + pub url: Option, + #[serde(rename = "routing")] + pub routing: Box, +} + +impl ContainersPort { + pub fn new(protocol: crate::models::ContainersPortProtocol, routing: crate::models::ContainersPortRouting) -> ContainersPort { + ContainersPort { + protocol, + internal_port: None, + hostname: None, + port: None, + path: None, + url: None, + routing: Box::new(routing), + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_port_protocol.rs b/sdks/api/runtime/rust/src/models/containers_port_protocol.rs new file mode 100644 index 0000000000..e54bd29740 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_port_protocol.rs @@ -0,0 +1,48 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersPortProtocol { + #[serde(rename = "http")] + Http, + #[serde(rename = "https")] + Https, + #[serde(rename = "tcp")] + Tcp, + #[serde(rename = "tcp_tls")] + TcpTls, + #[serde(rename = "udp")] + Udp, + +} + +impl ToString for ContainersPortProtocol { + fn to_string(&self) -> String { + match self { + Self::Http => String::from("http"), + Self::Https => String::from("https"), + Self::Tcp => String::from("tcp"), + Self::TcpTls => String::from("tcp_tls"), + Self::Udp => String::from("udp"), + } + } +} + +impl Default for ContainersPortProtocol { + fn default() -> ContainersPortProtocol { + Self::Http + } +} + + + + diff --git a/sdks/api/runtime/rust/src/models/containers_port_routing.rs b/sdks/api/runtime/rust/src/models/containers_port_routing.rs new file mode 100644 index 0000000000..3098c15996 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_port_routing.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersPortRouting { + #[serde(rename = "guard", skip_serializing_if = "Option::is_none")] + pub guard: Option, + #[serde(rename = "host", skip_serializing_if = "Option::is_none")] + pub host: Option, +} + +impl ContainersPortRouting { + pub fn new() -> ContainersPortRouting { + ContainersPortRouting { + guard: None, + host: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_query_log_stream.rs b/sdks/api/runtime/rust/src/models/containers_query_log_stream.rs new file mode 100644 index 0000000000..9b9ec99114 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_query_log_stream.rs @@ -0,0 +1,42 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ContainersQueryLogStream { + #[serde(rename = "std_out")] + StdOut, + #[serde(rename = "std_err")] + StdErr, + #[serde(rename = "all")] + All, + +} + +impl ToString for ContainersQueryLogStream { + fn to_string(&self) -> String { + match self { + Self::StdOut => String::from("std_out"), + Self::StdErr => String::from("std_err"), + Self::All => String::from("all"), + } + } +} + +impl Default for ContainersQueryLogStream { + fn default() -> ContainersQueryLogStream { + Self::StdOut + } +} + + + + diff --git a/sdks/api/runtime/rust/src/models/actors_resources.rs b/sdks/api/runtime/rust/src/models/containers_resources.rs similarity index 81% rename from sdks/api/runtime/rust/src/models/actors_resources.rs rename to sdks/api/runtime/rust/src/models/containers_resources.rs index 743c6394c5..9e85ee3bd1 100644 --- a/sdks/api/runtime/rust/src/models/actors_resources.rs +++ b/sdks/api/runtime/rust/src/models/containers_resources.rs @@ -12,7 +12,7 @@ #[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] -pub struct ActorsResources { +pub struct ContainersResources { /// The number of CPU cores in millicores, or 1/1000 of a core. For example, 1/8 of a core would be 125 millicores, and 1 core would be 1000 millicores. #[serde(rename = "cpu")] pub cpu: i32, @@ -21,9 +21,9 @@ pub struct ActorsResources { pub memory: i32, } -impl ActorsResources { - pub fn new(cpu: i32, memory: i32) -> ActorsResources { - ActorsResources { +impl ContainersResources { + pub fn new(cpu: i32, memory: i32) -> ContainersResources { + ContainersResources { cpu, memory, } diff --git a/sdks/api/runtime/rust/src/models/containers_runtime.rs b/sdks/api/runtime/rust/src/models/containers_runtime.rs new file mode 100644 index 0000000000..68f2bf7e0f --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_runtime.rs @@ -0,0 +1,34 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersRuntime { + #[serde(rename = "build")] + pub build: uuid::Uuid, + #[serde(rename = "arguments", skip_serializing_if = "Option::is_none")] + pub arguments: Option>, + #[serde(rename = "environment", skip_serializing_if = "Option::is_none")] + pub environment: Option<::std::collections::HashMap>, +} + +impl ContainersRuntime { + pub fn new(build: uuid::Uuid) -> ContainersRuntime { + ContainersRuntime { + build, + arguments: None, + environment: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_request.rs b/sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_request.rs new file mode 100644 index 0000000000..3ff0a31aab --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_request.rs @@ -0,0 +1,34 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersUpgradeAllContainersRequest { + #[serde(rename = "tags", deserialize_with = "Option::deserialize")] + pub tags: Option, + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde(rename = "build_tags", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")] + pub build_tags: Option>, +} + +impl ContainersUpgradeAllContainersRequest { + pub fn new(tags: Option) -> ContainersUpgradeAllContainersRequest { + ContainersUpgradeAllContainersRequest { + tags, + build: None, + build_tags: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_response.rs b/sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_response.rs new file mode 100644 index 0000000000..c7a7bab522 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_upgrade_all_containers_response.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersUpgradeAllContainersResponse { + #[serde(rename = "count")] + pub count: i64, +} + +impl ContainersUpgradeAllContainersResponse { + pub fn new(count: i64) -> ContainersUpgradeAllContainersResponse { + ContainersUpgradeAllContainersResponse { + count, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/containers_upgrade_container_request.rs b/sdks/api/runtime/rust/src/models/containers_upgrade_container_request.rs new file mode 100644 index 0000000000..8aaa190049 --- /dev/null +++ b/sdks/api/runtime/rust/src/models/containers_upgrade_container_request.rs @@ -0,0 +1,31 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ContainersUpgradeContainerRequest { + #[serde(rename = "build", skip_serializing_if = "Option::is_none")] + pub build: Option, + #[serde(rename = "build_tags", default, with = "::serde_with::rust::double_option", skip_serializing_if = "Option::is_none")] + pub build_tags: Option>, +} + +impl ContainersUpgradeContainerRequest { + pub fn new() -> ContainersUpgradeContainerRequest { + ContainersUpgradeContainerRequest { + build: None, + build_tags: None, + } + } +} + + diff --git a/sdks/api/runtime/rust/src/models/mod.rs b/sdks/api/runtime/rust/src/models/mod.rs index ae2e0fd768..1f888f66ea 100644 --- a/sdks/api/runtime/rust/src/models/mod.rs +++ b/sdks/api/runtime/rust/src/models/mod.rs @@ -18,6 +18,8 @@ pub mod actors_export_actor_logs_response; pub use self::actors_export_actor_logs_response::ActorsExportActorLogsResponse; pub mod actors_get_actor_logs_response; pub use self::actors_get_actor_logs_response::ActorsGetActorLogsResponse; +pub mod actors_get_actor_metrics_response; +pub use self::actors_get_actor_metrics_response::ActorsGetActorMetricsResponse; pub mod actors_get_actor_response; pub use self::actors_get_actor_response::ActorsGetActorResponse; pub mod actors_get_actor_usage_response; @@ -38,10 +40,15 @@ pub mod actors_port_protocol; pub use self::actors_port_protocol::ActorsPortProtocol; pub mod actors_port_routing; pub use self::actors_port_routing::ActorsPortRouting; +<<<<<<< HEAD pub mod actors_query_actors_response; pub use self::actors_query_actors_response::ActorsQueryActorsResponse; pub mod actors_resources; pub use self::actors_resources::ActorsResources; +======= +pub mod actors_query_log_stream; +pub use self::actors_query_log_stream::ActorsQueryLogStream; +>>>>>>> 43e5048bc (fix: api changes) pub mod actors_runtime; pub use self::actors_runtime::ActorsRuntime; pub mod actors_upgrade_actor_request; @@ -72,6 +79,54 @@ pub mod builds_prepare_build_response; pub use self::builds_prepare_build_response::BuildsPrepareBuildResponse; pub mod builds_resources; pub use self::builds_resources::BuildsResources; +pub mod containers_container; +pub use self::containers_container::ContainersContainer; +pub mod containers_create_container_network_request; +pub use self::containers_create_container_network_request::ContainersCreateContainerNetworkRequest; +pub mod containers_create_container_port_request; +pub use self::containers_create_container_port_request::ContainersCreateContainerPortRequest; +pub mod containers_create_container_request; +pub use self::containers_create_container_request::ContainersCreateContainerRequest; +pub mod containers_create_container_response; +pub use self::containers_create_container_response::ContainersCreateContainerResponse; +pub mod containers_create_container_runtime_network_request; +pub use self::containers_create_container_runtime_network_request::ContainersCreateContainerRuntimeNetworkRequest; +pub mod containers_create_container_runtime_request; +pub use self::containers_create_container_runtime_request::ContainersCreateContainerRuntimeRequest; +pub mod containers_endpoint_type; +pub use self::containers_endpoint_type::ContainersEndpointType; +pub mod containers_get_container_logs_response; +pub use self::containers_get_container_logs_response::ContainersGetContainerLogsResponse; +pub mod containers_get_container_metrics_response; +pub use self::containers_get_container_metrics_response::ContainersGetContainerMetricsResponse; +pub mod containers_get_container_response; +pub use self::containers_get_container_response::ContainersGetContainerResponse; +pub mod containers_lifecycle; +pub use self::containers_lifecycle::ContainersLifecycle; +pub mod containers_list_containers_response; +pub use self::containers_list_containers_response::ContainersListContainersResponse; +pub mod containers_network; +pub use self::containers_network::ContainersNetwork; +pub mod containers_network_mode; +pub use self::containers_network_mode::ContainersNetworkMode; +pub mod containers_port; +pub use self::containers_port::ContainersPort; +pub mod containers_port_protocol; +pub use self::containers_port_protocol::ContainersPortProtocol; +pub mod containers_port_routing; +pub use self::containers_port_routing::ContainersPortRouting; +pub mod containers_query_log_stream; +pub use self::containers_query_log_stream::ContainersQueryLogStream; +pub mod containers_resources; +pub use self::containers_resources::ContainersResources; +pub mod containers_runtime; +pub use self::containers_runtime::ContainersRuntime; +pub mod containers_upgrade_all_containers_request; +pub use self::containers_upgrade_all_containers_request::ContainersUpgradeAllContainersRequest; +pub mod containers_upgrade_all_containers_response; +pub use self::containers_upgrade_all_containers_response::ContainersUpgradeAllContainersResponse; +pub mod containers_upgrade_container_request; +pub use self::containers_upgrade_container_request::ContainersUpgradeContainerRequest; pub mod error_body; pub use self::error_body::ErrorBody; pub mod pagination; diff --git a/sdks/api/runtime/typescript/src/Client.ts b/sdks/api/runtime/typescript/src/Client.ts index b9aa6916ec..2492cfa619 100644 --- a/sdks/api/runtime/typescript/src/Client.ts +++ b/sdks/api/runtime/typescript/src/Client.ts @@ -6,6 +6,7 @@ import * as environments from "./environments"; import * as core from "./core"; import { Actors } from "./api/resources/actors/client/Client"; import { Builds } from "./api/resources/builds/client/Client"; +import { Containers } from "./api/resources/containers/client/Client"; import { Regions } from "./api/resources/regions/client/Client"; import { Routes } from "./api/resources/routes/client/Client"; @@ -37,6 +38,7 @@ export declare namespace RivetClient { export class RivetClient { protected _actors: Actors | undefined; protected _builds: Builds | undefined; + protected _containers: Containers | undefined; protected _regions: Regions | undefined; protected _routes: Routes | undefined; @@ -50,6 +52,10 @@ export class RivetClient { return (this._builds ??= new Builds(this._options)); } + public get containers(): Containers { + return (this._containers ??= new Containers(this._options)); + } + public get regions(): Regions { return (this._regions ??= new Regions(this._options)); } diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/client/Client.ts b/sdks/api/runtime/typescript/src/api/resources/actors/client/Client.ts index 9d29de0eff..16af41d930 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/client/Client.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/client/Client.ts @@ -9,6 +9,7 @@ import * as serializers from "../../../../serialization/index"; import urlJoin from "url-join"; import * as errors from "../../../../errors/index"; import { Logs } from "../resources/logs/client/Client"; +import { Metrics } from "../resources/metrics/client/Client"; export declare namespace Actors { export interface Options { @@ -37,6 +38,7 @@ export declare namespace Actors { export class Actors { protected _logs: Logs | undefined; + protected _metrics: Metrics | undefined; constructor(protected readonly _options: Actors.Options = {}) {} @@ -44,6 +46,10 @@ export class Actors { return (this._logs ??= new Logs(this._options)); } + public get metrics(): Metrics { + return (this._metrics ??= new Metrics(this._options)); + } + /** * Gets a actor. * @@ -91,7 +97,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, ), method: "GET", headers: { @@ -196,7 +202,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors/{actor}."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v2/actors/{actor}."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -264,7 +270,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors", + "/v2/actors", ), method: "GET", headers: { @@ -369,7 +375,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v2/actors."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -415,10 +421,6 @@ export class Actors { * ports: {}, * waitReady: true * }, - * resources: { - * cpu: 1, - * memory: 1 - * }, * lifecycle: { * killTimeout: 1000000, * durable: true @@ -451,7 +453,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors", + "/v2/actors", ), method: "POST", headers: { @@ -557,7 +559,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v2/actors."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -610,7 +612,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}`, ), method: "DELETE", headers: { @@ -715,7 +717,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /actors/{actor}."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /v2/actors/{actor}."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -769,7 +771,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - `/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}/upgrade`, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}/upgrade`, ), method: "POST", headers: { @@ -875,7 +877,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors/{actor}/upgrade."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v2/actors/{actor}/upgrade."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, @@ -930,7 +932,7 @@ export class Actors { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors/upgrade", + "/v2/actors/upgrade", ), method: "POST", headers: { @@ -1036,7 +1038,7 @@ export class Actors { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /actors/upgrade."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v2/actors/upgrade."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts index 9ffc6e6076..90685a87a4 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/client/requests/CreateActorRequestQuery.ts @@ -30,10 +30,6 @@ import * as Rivet from "../../../../index"; * ports: {}, * waitReady: true * }, - * resources: { - * cpu: 1, - * memory: 1 - * }, * lifecycle: { * killTimeout: 1000000, * durable: true diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/Actor.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/Actor.ts index 1aeb34bd9f..268707a801 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/Actor.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/Actor.ts @@ -10,7 +10,6 @@ export interface Actor { tags?: unknown; runtime: Rivet.actors.Runtime; network: Rivet.actors.Network; - resources?: Rivet.actors.Resources; lifecycle: Rivet.actors.Lifecycle; createdAt: Rivet.Timestamp; startedAt?: Rivet.Timestamp; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/index.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/index.ts index e8ecca191e..ced439f191 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/index.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/common/types/index.ts @@ -1,7 +1,6 @@ export * from "./Actor"; export * from "./Runtime"; export * from "./Lifecycle"; -export * from "./Resources"; export * from "./Network"; export * from "./NetworkMode"; export * from "./Port"; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/index.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/index.ts index bb93a8b0c8..8b57b1caf5 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/resources/index.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/index.ts @@ -2,4 +2,7 @@ export * as common from "./common"; export * from "./common/types"; export * as logs from "./logs"; export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; export * from "./logs/client/requests"; +export * from "./metrics/client/requests"; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/client/Client.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/client/Client.ts index 7f40190724..b99aeb5adc 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/client/Client.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/client/Client.ts @@ -85,7 +85,7 @@ export class Logs { (await core.Supplier.get(this._options.baseUrl)) ?? (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, - "/actors/logs", + "/v2/actors/logs", ), method: "GET", headers: { @@ -190,7 +190,7 @@ export class Logs { body: _response.error.rawBody, }); case "timeout": - throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /actors/logs."); + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v2/actors/logs."); case "unknown": throw new errors.RivetError({ message: _response.error.errorMessage, diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts index 8d6370cdfe..00781f554c 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/logs/types/GetActorLogsResponse.ts @@ -6,7 +6,7 @@ import * as Rivet from "../../../../../index"; export interface GetActorLogsResponse { /** List of actor IDs in these logs. The order of these correspond to the index in the log entry. */ - actorIds: Rivet.Id[]; + actorIds: string[]; /** Sorted old to new. */ lines: string[]; /** Sorted old to new. */ diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/Client.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/Client.ts new file mode 100644 index 0000000000..b8d7eb13c7 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/Client.ts @@ -0,0 +1,209 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../../index"; +import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../errors/index"; + +export declare namespace Metrics { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Metrics { + constructor(protected readonly _options: Metrics.Options = {}) {} + + /** + * Returns the metrics for a given actor. + * + * @param {Rivet.Id} actor - The id of the actor to destroy + * @param {Rivet.actors.GetActorMetricsRequestQuery} request + * @param {Metrics.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.actors.metrics.get("string", { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * }) + */ + public async get( + actor: Rivet.Id, + request: Rivet.actors.GetActorMetricsRequestQuery, + requestOptions?: Metrics.RequestOptions, + ): Promise { + const { project, environment, start, end, interval } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["start"] = start.toString(); + _queryParams["end"] = end.toString(); + _queryParams["interval"] = interval.toString(); + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v2/actors/${encodeURIComponent(serializers.Id.jsonOrThrow(actor))}/metrics/history`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.actors.GetActorMetricsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError( + "Timeout exceeded when calling GET /v2/actors/{actor}/metrics/history.", + ); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/index.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts new file mode 100644 index 0000000000..060467b645 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/GetActorMetricsRequestQuery.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * } + */ +export interface GetActorMetricsRequestQuery { + project?: string; + environment?: string; + start: number; + end: number; + interval: number; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/index.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/index.ts new file mode 100644 index 0000000000..7a8cf1debe --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetActorMetricsRequestQuery } from "./GetActorMetricsRequestQuery"; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/index.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts new file mode 100644 index 0000000000..35c68a3ffb --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GetActorMetricsResponse { + actorIds: string[]; + metricNames: string[]; + metricAttributes: Record[]; + metricTypes: string[]; + metricValues: number[][]; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/index.ts b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/index.ts new file mode 100644 index 0000000000..c5cf235d4d --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/actors/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetActorMetricsResponse"; diff --git a/sdks/api/runtime/typescript/src/api/resources/actors/types/CreateActorRequest.ts b/sdks/api/runtime/typescript/src/api/resources/actors/types/CreateActorRequest.ts index 121fcf08bd..dc7b38fc28 100644 --- a/sdks/api/runtime/typescript/src/api/resources/actors/types/CreateActorRequest.ts +++ b/sdks/api/runtime/typescript/src/api/resources/actors/types/CreateActorRequest.ts @@ -11,6 +11,5 @@ export interface CreateActorRequest { buildTags?: unknown; runtime?: Rivet.actors.CreateActorRuntimeRequest; network?: Rivet.actors.CreateActorNetworkRequest; - resources?: Rivet.actors.Resources; lifecycle?: Rivet.actors.Lifecycle; } diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/Client.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/Client.ts new file mode 100644 index 0000000000..883a6bb63c --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/Client.ts @@ -0,0 +1,1067 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../environments"; +import * as core from "../../../../core"; +import * as Rivet from "../../../index"; +import * as serializers from "../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../errors/index"; +import { Logs } from "../resources/logs/client/Client"; +import { Metrics } from "../resources/metrics/client/Client"; + +export declare namespace Containers { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Containers { + protected _logs: Logs | undefined; + protected _metrics: Metrics | undefined; + + constructor(protected readonly _options: Containers.Options = {}) {} + + public get logs(): Logs { + return (this._logs ??= new Logs(this._options)); + } + + public get metrics(): Metrics { + return (this._metrics ??= new Metrics(this._options)); + } + + /** + * Gets a container. + * + * @param {Rivet.Id} container - The id of the container to destroy + * @param {Rivet.containers.ListContainersRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.get("string", { + * project: "string", + * environment: "string", + * endpointType: "hostname" + * }) + */ + public async get( + container: Rivet.Id, + request: Rivet.containers.ListContainersRequestQuery = {}, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, endpointType } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.containers.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.GetContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v1/containers/{container}."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Lists all containers associated with the token used. Can be filtered by tags in the query string. + * + * @param {Rivet.containers.GetContainersRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.list({ + * project: "string", + * environment: "string", + * endpointType: "hostname", + * tagsJson: "string", + * includeDestroyed: true, + * cursor: "string" + * }) + */ + public async list( + request: Rivet.containers.GetContainersRequestQuery = {}, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, endpointType, tagsJson, includeDestroyed, cursor } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.containers.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + if (tagsJson != null) { + _queryParams["tags_json"] = tagsJson; + } + + if (includeDestroyed != null) { + _queryParams["include_destroyed"] = includeDestroyed.toString(); + } + + if (cursor != null) { + _queryParams["cursor"] = cursor; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers", + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.ListContainersResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v1/containers."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Create a new container. + * + * @param {Rivet.containers.CreateContainerRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.create({ + * project: "string", + * environment: "string", + * endpointType: "hostname", + * body: { + * region: "string", + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * }, + * runtime: { + * environment: {}, + * network: { + * endpointType: "hostname" + * } + * }, + * network: { + * mode: "bridge", + * ports: {}, + * waitReady: true + * }, + * resources: { + * cpu: 1, + * memory: 1 + * }, + * lifecycle: { + * killTimeout: 1000000, + * durable: true + * } + * } + * }) + */ + public async create( + request: Rivet.containers.CreateContainerRequestQuery, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, endpointType, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (endpointType != null) { + _queryParams["endpoint_type"] = serializers.containers.EndpointType.jsonOrThrow(endpointType, { + unrecognizedObjectKeys: "strip", + }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers", + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.containers.CreateContainerRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.CreateContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v1/containers."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Destroy a container. + * + * @param {Rivet.Id} container - The id of the container to destroy + * @param {Rivet.containers.DestroyContainerRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.destroy("string", { + * project: "string", + * environment: "string", + * overrideKillTimeout: 1000000 + * }) + */ + public async destroy( + container: Rivet.Id, + request: Rivet.containers.DestroyContainerRequestQuery = {}, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, overrideKillTimeout } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + if (overrideKillTimeout != null) { + _queryParams["override_kill_timeout"] = overrideKillTimeout.toString(); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}`, + ), + method: "DELETE", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.DestroyContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling DELETE /v1/containers/{container}."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Upgrades a container. + * + * @param {Rivet.Id} container - The id of the container to upgrade + * @param {Rivet.containers.UpgradeContainerRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.upgrade("string", { + * project: "string", + * environment: "string", + * body: { + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * }) + */ + public async upgrade( + container: Rivet.Id, + request: Rivet.containers.UpgradeContainerRequestQuery, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}/upgrade`, + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.containers.UpgradeContainerRequest.jsonOrThrow(_body, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.UpgradeContainerResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError( + "Timeout exceeded when calling POST /v1/containers/{container}/upgrade.", + ); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Upgrades all containers matching the given tags. + * + * @param {Rivet.containers.UpgradeAllContainersRequestQuery} request + * @param {Containers.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.upgradeAll({ + * project: "string", + * environment: "string", + * body: { + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * }) + */ + public async upgradeAll( + request: Rivet.containers.UpgradeAllContainersRequestQuery, + requestOptions?: Containers.RequestOptions, + ): Promise { + const { project, environment, body: _body } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers/upgrade", + ), + method: "POST", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.containers.UpgradeAllContainersRequest.jsonOrThrow(_body, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.UpgradeAllContainersResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling POST /v1/containers/upgrade."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts new file mode 100644 index 0000000000..4cc48c6f03 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/CreateContainerRequestQuery.ts @@ -0,0 +1,49 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname", + * body: { + * region: "string", + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * }, + * runtime: { + * environment: {}, + * network: { + * endpointType: "hostname" + * } + * }, + * network: { + * mode: "bridge", + * ports: {}, + * waitReady: true + * }, + * resources: { + * cpu: 1, + * memory: 1 + * }, + * lifecycle: { + * killTimeout: 1000000, + * durable: true + * } + * } + * } + */ +export interface CreateContainerRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.containers.EndpointType; + body: Rivet.containers.CreateContainerRequest; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts new file mode 100644 index 0000000000..bde4d00874 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/DestroyContainerRequestQuery.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * overrideKillTimeout: 1000000 + * } + */ +export interface DestroyContainerRequestQuery { + project?: string; + environment?: string; + /** + * The duration to wait for in milliseconds before killing the container. This should be used to override the default kill timeout if a faster time is needed, say for ignoring a graceful shutdown. + */ + overrideKillTimeout?: number; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts new file mode 100644 index 0000000000..3455b96aab --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/GetContainersRequestQuery.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname", + * tagsJson: "string", + * includeDestroyed: true, + * cursor: "string" + * } + */ +export interface GetContainersRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.containers.EndpointType; + tagsJson?: string; + includeDestroyed?: boolean; + cursor?: string; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts new file mode 100644 index 0000000000..ee279d09c3 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/ListContainersRequestQuery.ts @@ -0,0 +1,19 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * endpointType: "hostname" + * } + */ +export interface ListContainersRequestQuery { + project?: string; + environment?: string; + endpointType?: Rivet.containers.EndpointType; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts new file mode 100644 index 0000000000..97b0fe6caa --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeAllContainersRequestQuery.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * body: { + * tags: { + * "key": "value" + * }, + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * } + */ +export interface UpgradeAllContainersRequestQuery { + project?: string; + environment?: string; + body: Rivet.containers.UpgradeAllContainersRequest; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts new file mode 100644 index 0000000000..dd88b848d7 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/UpgradeContainerRequestQuery.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * body: { + * build: "d5e9c84f-c2b2-4bf4-b4b0-7ffd7a9ffc32", + * buildTags: { + * "key": "value" + * } + * } + * } + */ +export interface UpgradeContainerRequestQuery { + project?: string; + environment?: string; + body: Rivet.containers.UpgradeContainerRequest; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/index.ts new file mode 100644 index 0000000000..6fdd1f8d4e --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/client/requests/index.ts @@ -0,0 +1,6 @@ +export { type ListContainersRequestQuery } from "./ListContainersRequestQuery"; +export { type GetContainersRequestQuery } from "./GetContainersRequestQuery"; +export { type CreateContainerRequestQuery } from "./CreateContainerRequestQuery"; +export { type DestroyContainerRequestQuery } from "./DestroyContainerRequestQuery"; +export { type UpgradeContainerRequestQuery } from "./UpgradeContainerRequestQuery"; +export { type UpgradeAllContainersRequestQuery } from "./UpgradeAllContainersRequestQuery"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/index.ts new file mode 100644 index 0000000000..a931b36375 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/index.ts @@ -0,0 +1,3 @@ +export * from "./types"; +export * from "./resources"; +export * from "./client"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Container.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Container.ts new file mode 100644 index 0000000000..3b8f94a422 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Container.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface Container { + id: Rivet.Id; + region: string; + tags?: unknown; + runtime: Rivet.containers.Runtime; + network: Rivet.containers.Network; + resources: Rivet.containers.Resources; + lifecycle: Rivet.containers.Lifecycle; + createdAt: Rivet.Timestamp; + startedAt?: Rivet.Timestamp; + destroyedAt?: Rivet.Timestamp; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts new file mode 100644 index 0000000000..3385b64fb7 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/EndpointType.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type EndpointType = "hostname" | "path"; +export const EndpointType = { + Hostname: "hostname", + Path: "path", +} as const; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts new file mode 100644 index 0000000000..039f163247 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/GuardRouting.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GuardRouting {} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts new file mode 100644 index 0000000000..cdd2164517 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/HostRouting.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface HostRouting {} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts new file mode 100644 index 0000000000..e45d39b181 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Lifecycle.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Lifecycle { + /** The duration to wait for in milliseconds before killing the container. This should be set to a safe default, and can be overridden during a DELETE request if needed. */ + killTimeout?: number; + /** If true, the container will try to reschedule itself automatically in the event of a crash or a datacenter failover. The container will not reschedule if it exits successfully. */ + durable?: boolean; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Network.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Network.ts new file mode 100644 index 0000000000..7bb456e91d --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Network.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface Network { + mode: Rivet.containers.NetworkMode; + ports: Record; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts new file mode 100644 index 0000000000..14aee186e8 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/NetworkMode.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type NetworkMode = "bridge" | "host"; +export const NetworkMode = { + Bridge: "bridge", + Host: "host", +} as const; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Port.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Port.ts new file mode 100644 index 0000000000..0fa9c5e224 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Port.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface Port { + protocol: Rivet.containers.PortProtocol; + internalPort?: number; + hostname?: string; + port?: number; + path?: string; + /** Fully formed connection URL including protocol, hostname, port, and path, if applicable. */ + url?: string; + routing: Rivet.containers.PortRouting; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts new file mode 100644 index 0000000000..0ec9364df2 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortProtocol.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type PortProtocol = "http" | "https" | "tcp" | "tcp_tls" | "udp"; +export const PortProtocol = { + Http: "http", + Https: "https", + Tcp: "tcp", + TcpTls: "tcp_tls", + Udp: "udp", +} as const; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts new file mode 100644 index 0000000000..6ec92a6d0a --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/PortRouting.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface PortRouting { + guard?: Rivet.containers.GuardRouting; + host?: Rivet.containers.HostRouting; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Resources.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Resources.ts new file mode 100644 index 0000000000..8255dc6705 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Resources.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Resources { + /** + * The number of CPU cores in millicores, or 1/1000 of a core. For example, + * 1/8 of a core would be 125 millicores, and 1 core would be 1000 + * millicores. + */ + cpu: number; + /** The amount of memory in megabytes */ + memory: number; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Runtime.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Runtime.ts new file mode 100644 index 0000000000..a918ef8387 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/Runtime.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface Runtime { + build: string; + arguments?: string[]; + environment?: Record; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/index.ts new file mode 100644 index 0000000000..634dcaff02 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/common/types/index.ts @@ -0,0 +1,12 @@ +export * from "./Container"; +export * from "./Runtime"; +export * from "./Lifecycle"; +export * from "./Resources"; +export * from "./Network"; +export * from "./NetworkMode"; +export * from "./Port"; +export * from "./PortProtocol"; +export * from "./PortRouting"; +export * from "./GuardRouting"; +export * from "./HostRouting"; +export * from "./EndpointType"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/index.ts new file mode 100644 index 0000000000..8b57b1caf5 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/index.ts @@ -0,0 +1,8 @@ +export * as common from "./common"; +export * from "./common/types"; +export * as logs from "./logs"; +export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; +export * from "./logs/client/requests"; +export * from "./metrics/client/requests"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/Client.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/Client.ts new file mode 100644 index 0000000000..d8ff58ebf2 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/Client.ts @@ -0,0 +1,234 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../../index"; +import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../errors/index"; + +export declare namespace Logs { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Logs { + constructor(protected readonly _options: Logs.Options = {}) {} + + /** + * Returns the logs for a given container. + * + * @param {Rivet.containers.GetContainerLogsRequestQuery} request + * @param {Logs.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.logs.get({ + * project: "string", + * environment: "string", + * stream: "std_out", + * containerIdsJson: "string", + * searchText: "string", + * searchCaseSensitive: true, + * searchEnableRegex: true, + * watchIndex: "string" + * }) + */ + public async get( + request: Rivet.containers.GetContainerLogsRequestQuery, + requestOptions?: Logs.RequestOptions, + ): Promise { + const { + project, + environment, + stream, + containerIdsJson, + searchText, + searchCaseSensitive, + searchEnableRegex, + watchIndex, + } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["stream"] = serializers.containers.QueryLogStream.jsonOrThrow(stream, { + unrecognizedObjectKeys: "strip", + }); + _queryParams["container_ids_json"] = containerIdsJson; + if (searchText != null) { + _queryParams["search_text"] = searchText; + } + + if (searchCaseSensitive != null) { + _queryParams["search_case_sensitive"] = searchCaseSensitive.toString(); + } + + if (searchEnableRegex != null) { + _queryParams["search_enable_regex"] = searchEnableRegex.toString(); + } + + if (watchIndex != null) { + _queryParams["watch_index"] = watchIndex; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + "/v1/containers/logs", + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.GetContainerLogsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError("Timeout exceeded when calling GET /v1/containers/logs."); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts new file mode 100644 index 0000000000..261f94c592 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/GetContainerLogsRequestQuery.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../../index"; + +/** + * @example + * { + * project: "string", + * environment: "string", + * stream: "std_out", + * containerIdsJson: "string", + * searchText: "string", + * searchCaseSensitive: true, + * searchEnableRegex: true, + * watchIndex: "string" + * } + */ +export interface GetContainerLogsRequestQuery { + project?: string; + environment?: string; + stream: Rivet.containers.QueryLogStream; + containerIdsJson: string; + searchText?: string; + searchCaseSensitive?: boolean; + searchEnableRegex?: boolean; + /** + * A query parameter denoting the requests watch index. + */ + watchIndex?: string; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts new file mode 100644 index 0000000000..fab7efe5ec --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetContainerLogsRequestQuery } from "./GetContainerLogsRequestQuery"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts new file mode 100644 index 0000000000..f64773d382 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/GetContainerLogsResponse.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../index"; + +export interface GetContainerLogsResponse { + /** List of container IDs in these logs. The order of these correspond to the index in the log entry. */ + containerIds: Rivet.Id[]; + /** Sorted old to new. */ + lines: string[]; + /** Sorted old to new. */ + timestamps: Rivet.Timestamp[]; + /** + * Streams the logs came from. + * + * 0 = stdout + * 1 = stderr + */ + streams: number[]; + /** List of flags denoting if this log is not directly from the container. */ + foreigns: boolean[]; + /** Index of the container that this log was for. Use this index to look the full ID in `container_ids`. */ + containerIndices: number[]; + watch: Rivet.WatchResponse; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts new file mode 100644 index 0000000000..556646c57e --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/QueryLogStream.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type QueryLogStream = "std_out" | "std_err" | "all"; +export const QueryLogStream = { + StdOut: "std_out", + StdErr: "std_err", + All: "all", +} as const; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/index.ts new file mode 100644 index 0000000000..0b4e4957cf --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/logs/types/index.ts @@ -0,0 +1,2 @@ +export * from "./GetContainerLogsResponse"; +export * from "./QueryLogStream"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/Client.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/Client.ts new file mode 100644 index 0000000000..9c02a77644 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/Client.ts @@ -0,0 +1,209 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../../index"; +import * as serializers from "../../../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../../../errors/index"; + +export declare namespace Metrics { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + token?: core.Supplier; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + /** Override the X-API-Version header */ + xApiVersion?: "25.5.0"; + } +} + +export class Metrics { + constructor(protected readonly _options: Metrics.Options = {}) {} + + /** + * Returns the metrics for a given container. + * + * @param {Rivet.Id} container - The id of the container to destroy + * @param {Rivet.containers.GetContainerMetricsRequestQuery} request + * @param {Metrics.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + * + * @example + * await client.containers.metrics.get("string", { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * }) + */ + public async get( + container: Rivet.Id, + request: Rivet.containers.GetContainerMetricsRequestQuery, + requestOptions?: Metrics.RequestOptions, + ): Promise { + const { project, environment, start, end, interval } = request; + const _queryParams: Record = {}; + if (project != null) { + _queryParams["project"] = project; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + _queryParams["start"] = start.toString(); + _queryParams["end"] = end.toString(); + _queryParams["interval"] = interval.toString(); + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.RivetEnvironment.Production, + `/v1/containers/${encodeURIComponent(serializers.Id.jsonOrThrow(container))}/metrics/history`, + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + "X-API-Version": requestOptions?.xApiVersion ?? this._options?.xApiVersion ?? "25.5.0", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.containers.GetContainerMetricsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 429: + throw new Rivet.RateLimitError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 403: + throw new Rivet.ForbiddenError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 408: + throw new Rivet.UnauthorizedError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 404: + throw new Rivet.NotFoundError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + case 400: + throw new Rivet.BadRequestError( + serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError( + "Timeout exceeded when calling GET /v1/containers/{container}/metrics/history.", + ); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader(): Promise { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts new file mode 100644 index 0000000000..4bd8cd4208 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/GetContainerMetricsRequestQuery.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * project: "string", + * environment: "string", + * start: 1, + * end: 1, + * interval: 1 + * } + */ +export interface GetContainerMetricsRequestQuery { + project?: string; + environment?: string; + start: number; + end: number; + interval: number; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts new file mode 100644 index 0000000000..276cf89b5a --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/client/requests/index.ts @@ -0,0 +1 @@ +export { type GetContainerMetricsRequestQuery } from "./GetContainerMetricsRequestQuery"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts new file mode 100644 index 0000000000..a1cb89d5c0 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface GetContainerMetricsResponse { + containerIds: string[]; + metricNames: string[]; + metricAttributes: Record[]; + metricTypes: string[]; + metricValues: number[][]; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/index.ts new file mode 100644 index 0000000000..835db2ac20 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetContainerMetricsResponse"; diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts new file mode 100644 index 0000000000..928c65a843 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerNetworkRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerNetworkRequest { + mode?: Rivet.containers.NetworkMode; + ports?: Record; + waitReady?: boolean; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts new file mode 100644 index 0000000000..5be8ff1e37 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerPortRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerPortRequest { + protocol: Rivet.containers.PortProtocol; + internalPort?: number; + routing?: Rivet.containers.PortRouting; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRequest.ts new file mode 100644 index 0000000000..eacb98877b --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRequest.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerRequest { + region?: string; + tags?: unknown; + build?: string; + buildTags?: unknown; + runtime?: Rivet.containers.CreateContainerRuntimeRequest; + network?: Rivet.containers.CreateContainerNetworkRequest; + resources: Rivet.containers.Resources; + lifecycle?: Rivet.containers.Lifecycle; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerResponse.ts new file mode 100644 index 0000000000..86b274113c --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerResponse.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerResponse { + /** The container that was created */ + container: Rivet.containers.Container; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts new file mode 100644 index 0000000000..5d9748a261 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerRuntimeNetworkRequest { + endpointType: Rivet.containers.EndpointType; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts new file mode 100644 index 0000000000..a8589fcb6e --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/CreateContainerRuntimeRequest.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface CreateContainerRuntimeRequest { + environment?: Record; + network?: Rivet.containers.CreateContainerRuntimeNetworkRequest; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts new file mode 100644 index 0000000000..2d37f14af9 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/DestroyContainerResponse.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface DestroyContainerResponse {} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/GetContainerResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/GetContainerResponse.ts new file mode 100644 index 0000000000..feaf5dc7e2 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/GetContainerResponse.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface GetContainerResponse { + container: Rivet.containers.Container; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/ListContainersResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/ListContainersResponse.ts new file mode 100644 index 0000000000..c9d67e9383 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/ListContainersResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../index"; + +export interface ListContainersResponse { + /** A list of containers for the project associated with the token. */ + containers: Rivet.containers.Container[]; + pagination: Rivet.Pagination; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts new file mode 100644 index 0000000000..3be10284be --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeAllContainersRequest { + tags?: unknown; + build?: string; + buildTags?: unknown; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts new file mode 100644 index 0000000000..d9c8a485b0 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeAllContainersResponse.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeAllContainersResponse { + count: number; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts new file mode 100644 index 0000000000..26d34a1bc8 --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerRequest.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeContainerRequest { + build?: string; + buildTags?: unknown; +} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts new file mode 100644 index 0000000000..6a6966dc4a --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/UpgradeContainerResponse.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface UpgradeContainerResponse {} diff --git a/sdks/api/runtime/typescript/src/api/resources/containers/types/index.ts b/sdks/api/runtime/typescript/src/api/resources/containers/types/index.ts new file mode 100644 index 0000000000..436ea216ac --- /dev/null +++ b/sdks/api/runtime/typescript/src/api/resources/containers/types/index.ts @@ -0,0 +1,13 @@ +export * from "./GetContainerResponse"; +export * from "./CreateContainerRequest"; +export * from "./CreateContainerRuntimeRequest"; +export * from "./CreateContainerRuntimeNetworkRequest"; +export * from "./CreateContainerNetworkRequest"; +export * from "./CreateContainerPortRequest"; +export * from "./CreateContainerResponse"; +export * from "./DestroyContainerResponse"; +export * from "./UpgradeContainerRequest"; +export * from "./UpgradeContainerResponse"; +export * from "./UpgradeAllContainersRequest"; +export * from "./UpgradeAllContainersResponse"; +export * from "./ListContainersResponse"; diff --git a/sdks/api/runtime/typescript/src/api/resources/index.ts b/sdks/api/runtime/typescript/src/api/resources/index.ts index d63ff467d7..9abca03905 100644 --- a/sdks/api/runtime/typescript/src/api/resources/index.ts +++ b/sdks/api/runtime/typescript/src/api/resources/index.ts @@ -1,5 +1,6 @@ export * as actors from "./actors"; export * as builds from "./builds"; +export * as containers from "./containers"; export * as regions from "./regions"; export * as routes from "./routes"; export * as common from "./common"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts index aad5b7be1c..0759888555 100644 --- a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts +++ b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Actor.ts @@ -8,7 +8,6 @@ import * as core from "../../../../../../core"; import { Id } from "../../../../common/types/Id"; import { Runtime } from "./Runtime"; import { Network } from "./Network"; -import { Resources } from "./Resources"; import { Lifecycle } from "./Lifecycle"; import { Timestamp } from "../../../../common/types/Timestamp"; @@ -19,7 +18,6 @@ export const Actor: core.serialization.ObjectSchema>>>>>> 43e5048bc (fix: api changes) diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/logs/types/GetActorLogsResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/logs/types/GetActorLogsResponse.ts index 5e04d4b778..9cdb06996e 100644 --- a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/logs/types/GetActorLogsResponse.ts +++ b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/logs/types/GetActorLogsResponse.ts @@ -5,7 +5,6 @@ import * as serializers from "../../../../../index"; import * as Rivet from "../../../../../../api/index"; import * as core from "../../../../../../core"; -import { Id } from "../../../../common/types/Id"; import { Timestamp } from "../../../../common/types/Timestamp"; import { WatchResponse } from "../../../../common/types/WatchResponse"; @@ -13,7 +12,7 @@ export const GetActorLogsResponse: core.serialization.ObjectSchema< serializers.actors.GetActorLogsResponse.Raw, Rivet.actors.GetActorLogsResponse > = core.serialization.object({ - actorIds: core.serialization.property("actor_ids", core.serialization.list(Id)), + actorIds: core.serialization.property("actor_ids", core.serialization.list(core.serialization.string())), lines: core.serialization.list(core.serialization.string()), timestamps: core.serialization.list(Timestamp), streams: core.serialization.list(core.serialization.number()), @@ -24,7 +23,7 @@ export const GetActorLogsResponse: core.serialization.ObjectSchema< export declare namespace GetActorLogsResponse { export interface Raw { - actor_ids: Id.Raw[]; + actor_ids: string[]; lines: string[]; timestamps: Timestamp.Raw[]; streams: number[]; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts new file mode 100644 index 0000000000..ed4893ed16 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/GetActorMetricsResponse.ts @@ -0,0 +1,34 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const GetActorMetricsResponse: core.serialization.ObjectSchema< + serializers.actors.GetActorMetricsResponse.Raw, + Rivet.actors.GetActorMetricsResponse +> = core.serialization.object({ + actorIds: core.serialization.property("actor_ids", core.serialization.list(core.serialization.string())), + metricNames: core.serialization.property("metric_names", core.serialization.list(core.serialization.string())), + metricAttributes: core.serialization.property( + "metric_attributes", + core.serialization.list(core.serialization.record(core.serialization.string(), core.serialization.string())), + ), + metricTypes: core.serialization.property("metric_types", core.serialization.list(core.serialization.string())), + metricValues: core.serialization.property( + "metric_values", + core.serialization.list(core.serialization.list(core.serialization.number())), + ), +}); + +export declare namespace GetActorMetricsResponse { + export interface Raw { + actor_ids: string[]; + metric_names: string[]; + metric_attributes: Record[]; + metric_types: string[]; + metric_values: number[][]; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/index.ts new file mode 100644 index 0000000000..c5cf235d4d --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetActorMetricsResponse"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts index 927681ed0d..564b80275e 100644 --- a/sdks/api/runtime/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts +++ b/sdks/api/runtime/typescript/src/serialization/resources/actors/types/CreateActorRequest.ts @@ -7,7 +7,6 @@ import * as Rivet from "../../../../api/index"; import * as core from "../../../../core"; import { CreateActorRuntimeRequest } from "./CreateActorRuntimeRequest"; import { CreateActorNetworkRequest } from "./CreateActorNetworkRequest"; -import { Resources } from "../resources/common/types/Resources"; import { Lifecycle } from "../resources/common/types/Lifecycle"; export const CreateActorRequest: core.serialization.ObjectSchema< @@ -20,7 +19,6 @@ export const CreateActorRequest: core.serialization.ObjectSchema< buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), runtime: CreateActorRuntimeRequest.optional(), network: CreateActorNetworkRequest.optional(), - resources: Resources.optional(), lifecycle: Lifecycle.optional(), }); @@ -32,7 +30,6 @@ export declare namespace CreateActorRequest { build_tags?: unknown | null; runtime?: CreateActorRuntimeRequest.Raw | null; network?: CreateActorNetworkRequest.Raw | null; - resources?: Resources.Raw | null; lifecycle?: Lifecycle.Raw | null; } } diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/index.ts new file mode 100644 index 0000000000..3ce0a3e38e --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./resources"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Container.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Container.ts new file mode 100644 index 0000000000..003549295d --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Container.ts @@ -0,0 +1,44 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Id } from "../../../../common/types/Id"; +import { Runtime } from "./Runtime"; +import { Network } from "./Network"; +import { Resources } from "./Resources"; +import { Lifecycle } from "./Lifecycle"; +import { Timestamp } from "../../../../common/types/Timestamp"; + +export const Container: core.serialization.ObjectSchema< + serializers.containers.Container.Raw, + Rivet.containers.Container +> = core.serialization.object({ + id: Id, + region: core.serialization.string(), + tags: core.serialization.unknown(), + runtime: Runtime, + network: Network, + resources: Resources, + lifecycle: Lifecycle, + createdAt: core.serialization.property("created_at", Timestamp), + startedAt: core.serialization.property("started_at", Timestamp.optional()), + destroyedAt: core.serialization.property("destroyed_at", Timestamp.optional()), +}); + +export declare namespace Container { + export interface Raw { + id: Id.Raw; + region: string; + tags?: unknown; + runtime: Runtime.Raw; + network: Network.Raw; + resources: Resources.Raw; + lifecycle: Lifecycle.Raw; + created_at: Timestamp.Raw; + started_at?: Timestamp.Raw | null; + destroyed_at?: Timestamp.Raw | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts new file mode 100644 index 0000000000..2897fe943a --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/EndpointType.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const EndpointType: core.serialization.Schema< + serializers.containers.EndpointType.Raw, + Rivet.containers.EndpointType +> = core.serialization.enum_(["hostname", "path"]); + +export declare namespace EndpointType { + export type Raw = "hostname" | "path"; +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts new file mode 100644 index 0000000000..19859fe146 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/GuardRouting.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const GuardRouting: core.serialization.ObjectSchema< + serializers.containers.GuardRouting.Raw, + Rivet.containers.GuardRouting +> = core.serialization.object({}); + +export declare namespace GuardRouting { + export interface Raw {} +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts new file mode 100644 index 0000000000..7af0c934be --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/HostRouting.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const HostRouting: core.serialization.ObjectSchema< + serializers.containers.HostRouting.Raw, + Rivet.containers.HostRouting +> = core.serialization.object({}); + +export declare namespace HostRouting { + export interface Raw {} +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts new file mode 100644 index 0000000000..dfce2b7327 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Lifecycle.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const Lifecycle: core.serialization.ObjectSchema< + serializers.containers.Lifecycle.Raw, + Rivet.containers.Lifecycle +> = core.serialization.object({ + killTimeout: core.serialization.property("kill_timeout", core.serialization.number().optional()), + durable: core.serialization.boolean().optional(), +}); + +export declare namespace Lifecycle { + export interface Raw { + kill_timeout?: number | null; + durable?: boolean | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Network.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Network.ts new file mode 100644 index 0000000000..8e4600735c --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Network.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { NetworkMode } from "./NetworkMode"; +import { Port } from "./Port"; + +export const Network: core.serialization.ObjectSchema = + core.serialization.object({ + mode: NetworkMode, + ports: core.serialization.record(core.serialization.string(), Port), + }); + +export declare namespace Network { + export interface Raw { + mode: NetworkMode.Raw; + ports: Record; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts new file mode 100644 index 0000000000..69d5581298 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/NetworkMode.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const NetworkMode: core.serialization.Schema< + serializers.containers.NetworkMode.Raw, + Rivet.containers.NetworkMode +> = core.serialization.enum_(["bridge", "host"]); + +export declare namespace NetworkMode { + export type Raw = "bridge" | "host"; +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Port.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Port.ts new file mode 100644 index 0000000000..3c9cb6d654 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Port.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { PortProtocol } from "./PortProtocol"; +import { PortRouting } from "./PortRouting"; + +export const Port: core.serialization.ObjectSchema = + core.serialization.object({ + protocol: PortProtocol, + internalPort: core.serialization.property("internal_port", core.serialization.number().optional()), + hostname: core.serialization.string().optional(), + port: core.serialization.number().optional(), + path: core.serialization.string().optional(), + url: core.serialization.string().optional(), + routing: PortRouting, + }); + +export declare namespace Port { + export interface Raw { + protocol: PortProtocol.Raw; + internal_port?: number | null; + hostname?: string | null; + port?: number | null; + path?: string | null; + url?: string | null; + routing: PortRouting.Raw; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts new file mode 100644 index 0000000000..c45d0c9be2 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortProtocol.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const PortProtocol: core.serialization.Schema< + serializers.containers.PortProtocol.Raw, + Rivet.containers.PortProtocol +> = core.serialization.enum_(["http", "https", "tcp", "tcp_tls", "udp"]); + +export declare namespace PortProtocol { + export type Raw = "http" | "https" | "tcp" | "tcp_tls" | "udp"; +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts new file mode 100644 index 0000000000..e84aace739 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/PortRouting.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { GuardRouting } from "./GuardRouting"; +import { HostRouting } from "./HostRouting"; + +export const PortRouting: core.serialization.ObjectSchema< + serializers.containers.PortRouting.Raw, + Rivet.containers.PortRouting +> = core.serialization.object({ + guard: GuardRouting.optional(), + host: HostRouting.optional(), +}); + +export declare namespace PortRouting { + export interface Raw { + guard?: GuardRouting.Raw | null; + host?: HostRouting.Raw | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Resources.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Resources.ts similarity index 58% rename from sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Resources.ts rename to sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Resources.ts index 02d2b0188f..f2cdd4f846 100644 --- a/sdks/api/runtime/typescript/src/serialization/resources/actors/resources/common/types/Resources.ts +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Resources.ts @@ -6,11 +6,13 @@ import * as serializers from "../../../../../index"; import * as Rivet from "../../../../../../api/index"; import * as core from "../../../../../../core"; -export const Resources: core.serialization.ObjectSchema = - core.serialization.object({ - cpu: core.serialization.number(), - memory: core.serialization.number(), - }); +export const Resources: core.serialization.ObjectSchema< + serializers.containers.Resources.Raw, + Rivet.containers.Resources +> = core.serialization.object({ + cpu: core.serialization.number(), + memory: core.serialization.number(), +}); export declare namespace Resources { export interface Raw { diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts new file mode 100644 index 0000000000..2966bdc0d1 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/Runtime.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const Runtime: core.serialization.ObjectSchema = + core.serialization.object({ + build: core.serialization.string(), + arguments: core.serialization.list(core.serialization.string()).optional(), + environment: core.serialization.record(core.serialization.string(), core.serialization.string()).optional(), + }); + +export declare namespace Runtime { + export interface Raw { + build: string; + arguments?: string[] | null; + environment?: Record | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/index.ts new file mode 100644 index 0000000000..634dcaff02 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/common/types/index.ts @@ -0,0 +1,12 @@ +export * from "./Container"; +export * from "./Runtime"; +export * from "./Lifecycle"; +export * from "./Resources"; +export * from "./Network"; +export * from "./NetworkMode"; +export * from "./Port"; +export * from "./PortProtocol"; +export * from "./PortRouting"; +export * from "./GuardRouting"; +export * from "./HostRouting"; +export * from "./EndpointType"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/index.ts new file mode 100644 index 0000000000..3bb7903aa2 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/index.ts @@ -0,0 +1,6 @@ +export * as common from "./common"; +export * from "./common/types"; +export * as logs from "./logs"; +export * from "./logs/types"; +export * as metrics from "./metrics"; +export * from "./metrics/types"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts new file mode 100644 index 0000000000..53d8a03a79 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/GetContainerLogsResponse.ts @@ -0,0 +1,38 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; +import { Id } from "../../../../common/types/Id"; +import { Timestamp } from "../../../../common/types/Timestamp"; +import { WatchResponse } from "../../../../common/types/WatchResponse"; + +export const GetContainerLogsResponse: core.serialization.ObjectSchema< + serializers.containers.GetContainerLogsResponse.Raw, + Rivet.containers.GetContainerLogsResponse +> = core.serialization.object({ + containerIds: core.serialization.property("container_ids", core.serialization.list(Id)), + lines: core.serialization.list(core.serialization.string()), + timestamps: core.serialization.list(Timestamp), + streams: core.serialization.list(core.serialization.number()), + foreigns: core.serialization.list(core.serialization.boolean()), + containerIndices: core.serialization.property( + "container_indices", + core.serialization.list(core.serialization.number()), + ), + watch: WatchResponse, +}); + +export declare namespace GetContainerLogsResponse { + export interface Raw { + container_ids: Id.Raw[]; + lines: string[]; + timestamps: Timestamp.Raw[]; + streams: number[]; + foreigns: boolean[]; + container_indices: number[]; + watch: WatchResponse.Raw; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts new file mode 100644 index 0000000000..c76ce49c84 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/QueryLogStream.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const QueryLogStream: core.serialization.Schema< + serializers.containers.QueryLogStream.Raw, + Rivet.containers.QueryLogStream +> = core.serialization.enum_(["std_out", "std_err", "all"]); + +export declare namespace QueryLogStream { + export type Raw = "std_out" | "std_err" | "all"; +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/index.ts new file mode 100644 index 0000000000..0b4e4957cf --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/logs/types/index.ts @@ -0,0 +1,2 @@ +export * from "./GetContainerLogsResponse"; +export * from "./QueryLogStream"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts new file mode 100644 index 0000000000..66f87c2ecc --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/GetContainerMetricsResponse.ts @@ -0,0 +1,34 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../../index"; +import * as Rivet from "../../../../../../api/index"; +import * as core from "../../../../../../core"; + +export const GetContainerMetricsResponse: core.serialization.ObjectSchema< + serializers.containers.GetContainerMetricsResponse.Raw, + Rivet.containers.GetContainerMetricsResponse +> = core.serialization.object({ + containerIds: core.serialization.property("container_ids", core.serialization.list(core.serialization.string())), + metricNames: core.serialization.property("metric_names", core.serialization.list(core.serialization.string())), + metricAttributes: core.serialization.property( + "metric_attributes", + core.serialization.list(core.serialization.record(core.serialization.string(), core.serialization.string())), + ), + metricTypes: core.serialization.property("metric_types", core.serialization.list(core.serialization.string())), + metricValues: core.serialization.property( + "metric_values", + core.serialization.list(core.serialization.list(core.serialization.number())), + ), +}); + +export declare namespace GetContainerMetricsResponse { + export interface Raw { + container_ids: string[]; + metric_names: string[]; + metric_attributes: Record[]; + metric_types: string[]; + metric_values: number[][]; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts new file mode 100644 index 0000000000..835db2ac20 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/resources/metrics/types/index.ts @@ -0,0 +1 @@ +export * from "./GetContainerMetricsResponse"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts new file mode 100644 index 0000000000..9da2cd4cc8 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerNetworkRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { NetworkMode } from "../resources/common/types/NetworkMode"; +import { CreateContainerPortRequest } from "./CreateContainerPortRequest"; + +export const CreateContainerNetworkRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerNetworkRequest.Raw, + Rivet.containers.CreateContainerNetworkRequest +> = core.serialization.object({ + mode: NetworkMode.optional(), + ports: core.serialization.record(core.serialization.string(), CreateContainerPortRequest).optional(), + waitReady: core.serialization.property("wait_ready", core.serialization.boolean().optional()), +}); + +export declare namespace CreateContainerNetworkRequest { + export interface Raw { + mode?: NetworkMode.Raw | null; + ports?: Record | null; + wait_ready?: boolean | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts new file mode 100644 index 0000000000..9016ed4e9f --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerPortRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { PortProtocol } from "../resources/common/types/PortProtocol"; +import { PortRouting } from "../resources/common/types/PortRouting"; + +export const CreateContainerPortRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerPortRequest.Raw, + Rivet.containers.CreateContainerPortRequest +> = core.serialization.object({ + protocol: PortProtocol, + internalPort: core.serialization.property("internal_port", core.serialization.number().optional()), + routing: PortRouting.optional(), +}); + +export declare namespace CreateContainerPortRequest { + export interface Raw { + protocol: PortProtocol.Raw; + internal_port?: number | null; + routing?: PortRouting.Raw | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts new file mode 100644 index 0000000000..d929186141 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRequest.ts @@ -0,0 +1,38 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { CreateContainerRuntimeRequest } from "./CreateContainerRuntimeRequest"; +import { CreateContainerNetworkRequest } from "./CreateContainerNetworkRequest"; +import { Resources } from "../resources/common/types/Resources"; +import { Lifecycle } from "../resources/common/types/Lifecycle"; + +export const CreateContainerRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerRequest.Raw, + Rivet.containers.CreateContainerRequest +> = core.serialization.object({ + region: core.serialization.string().optional(), + tags: core.serialization.unknown(), + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), + runtime: CreateContainerRuntimeRequest.optional(), + network: CreateContainerNetworkRequest.optional(), + resources: Resources, + lifecycle: Lifecycle.optional(), +}); + +export declare namespace CreateContainerRequest { + export interface Raw { + region?: string | null; + tags?: unknown; + build?: string | null; + build_tags?: unknown | null; + runtime?: CreateContainerRuntimeRequest.Raw | null; + network?: CreateContainerNetworkRequest.Raw | null; + resources: Resources.Raw; + lifecycle?: Lifecycle.Raw | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts new file mode 100644 index 0000000000..91e5c48b1f --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerResponse.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { Container } from "../resources/common/types/Container"; + +export const CreateContainerResponse: core.serialization.ObjectSchema< + serializers.containers.CreateContainerResponse.Raw, + Rivet.containers.CreateContainerResponse +> = core.serialization.object({ + container: Container, +}); + +export declare namespace CreateContainerResponse { + export interface Raw { + container: Container.Raw; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts new file mode 100644 index 0000000000..adc30398c8 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeNetworkRequest.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { EndpointType } from "../resources/common/types/EndpointType"; + +export const CreateContainerRuntimeNetworkRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerRuntimeNetworkRequest.Raw, + Rivet.containers.CreateContainerRuntimeNetworkRequest +> = core.serialization.object({ + endpointType: core.serialization.property("endpoint_type", EndpointType), +}); + +export declare namespace CreateContainerRuntimeNetworkRequest { + export interface Raw { + endpoint_type: EndpointType.Raw; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts new file mode 100644 index 0000000000..b4cd6b0c0e --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/CreateContainerRuntimeRequest.ts @@ -0,0 +1,23 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { CreateContainerRuntimeNetworkRequest } from "./CreateContainerRuntimeNetworkRequest"; + +export const CreateContainerRuntimeRequest: core.serialization.ObjectSchema< + serializers.containers.CreateContainerRuntimeRequest.Raw, + Rivet.containers.CreateContainerRuntimeRequest +> = core.serialization.object({ + environment: core.serialization.record(core.serialization.string(), core.serialization.string()).optional(), + network: CreateContainerRuntimeNetworkRequest.optional(), +}); + +export declare namespace CreateContainerRuntimeRequest { + export interface Raw { + environment?: Record | null; + network?: CreateContainerRuntimeNetworkRequest.Raw | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts new file mode 100644 index 0000000000..d658a62fdf --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/DestroyContainerResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const DestroyContainerResponse: core.serialization.ObjectSchema< + serializers.containers.DestroyContainerResponse.Raw, + Rivet.containers.DestroyContainerResponse +> = core.serialization.object({}); + +export declare namespace DestroyContainerResponse { + export interface Raw {} +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts new file mode 100644 index 0000000000..e2c540bd61 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/GetContainerResponse.ts @@ -0,0 +1,21 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { Container } from "../resources/common/types/Container"; + +export const GetContainerResponse: core.serialization.ObjectSchema< + serializers.containers.GetContainerResponse.Raw, + Rivet.containers.GetContainerResponse +> = core.serialization.object({ + container: Container, +}); + +export declare namespace GetContainerResponse { + export interface Raw { + container: Container.Raw; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts new file mode 100644 index 0000000000..b2b6744862 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/ListContainersResponse.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; +import { Container } from "../resources/common/types/Container"; +import { Pagination } from "../../common/types/Pagination"; + +export const ListContainersResponse: core.serialization.ObjectSchema< + serializers.containers.ListContainersResponse.Raw, + Rivet.containers.ListContainersResponse +> = core.serialization.object({ + containers: core.serialization.list(Container), + pagination: Pagination, +}); + +export declare namespace ListContainersResponse { + export interface Raw { + containers: Container.Raw[]; + pagination: Pagination.Raw; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts new file mode 100644 index 0000000000..438ebaa375 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersRequest.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeAllContainersRequest: core.serialization.ObjectSchema< + serializers.containers.UpgradeAllContainersRequest.Raw, + Rivet.containers.UpgradeAllContainersRequest +> = core.serialization.object({ + tags: core.serialization.unknown(), + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), +}); + +export declare namespace UpgradeAllContainersRequest { + export interface Raw { + tags?: unknown; + build?: string | null; + build_tags?: unknown | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts new file mode 100644 index 0000000000..2b283e1f7e --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeAllContainersResponse.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeAllContainersResponse: core.serialization.ObjectSchema< + serializers.containers.UpgradeAllContainersResponse.Raw, + Rivet.containers.UpgradeAllContainersResponse +> = core.serialization.object({ + count: core.serialization.number(), +}); + +export declare namespace UpgradeAllContainersResponse { + export interface Raw { + count: number; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts new file mode 100644 index 0000000000..c13050f711 --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerRequest.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeContainerRequest: core.serialization.ObjectSchema< + serializers.containers.UpgradeContainerRequest.Raw, + Rivet.containers.UpgradeContainerRequest +> = core.serialization.object({ + build: core.serialization.string().optional(), + buildTags: core.serialization.property("build_tags", core.serialization.unknown().optional()), +}); + +export declare namespace UpgradeContainerRequest { + export interface Raw { + build?: string | null; + build_tags?: unknown | null; + } +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts new file mode 100644 index 0000000000..e88f337dce --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/UpgradeContainerResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Rivet from "../../../../api/index"; +import * as core from "../../../../core"; + +export const UpgradeContainerResponse: core.serialization.ObjectSchema< + serializers.containers.UpgradeContainerResponse.Raw, + Rivet.containers.UpgradeContainerResponse +> = core.serialization.object({}); + +export declare namespace UpgradeContainerResponse { + export interface Raw {} +} diff --git a/sdks/api/runtime/typescript/src/serialization/resources/containers/types/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/index.ts new file mode 100644 index 0000000000..436ea216ac --- /dev/null +++ b/sdks/api/runtime/typescript/src/serialization/resources/containers/types/index.ts @@ -0,0 +1,13 @@ +export * from "./GetContainerResponse"; +export * from "./CreateContainerRequest"; +export * from "./CreateContainerRuntimeRequest"; +export * from "./CreateContainerRuntimeNetworkRequest"; +export * from "./CreateContainerNetworkRequest"; +export * from "./CreateContainerPortRequest"; +export * from "./CreateContainerResponse"; +export * from "./DestroyContainerResponse"; +export * from "./UpgradeContainerRequest"; +export * from "./UpgradeContainerResponse"; +export * from "./UpgradeAllContainersRequest"; +export * from "./UpgradeAllContainersResponse"; +export * from "./ListContainersResponse"; diff --git a/sdks/api/runtime/typescript/src/serialization/resources/index.ts b/sdks/api/runtime/typescript/src/serialization/resources/index.ts index be45100f79..379286d117 100644 --- a/sdks/api/runtime/typescript/src/serialization/resources/index.ts +++ b/sdks/api/runtime/typescript/src/serialization/resources/index.ts @@ -1,5 +1,6 @@ export * as actors from "./actors"; export * as builds from "./builds"; +export * as containers from "./containers"; export * as regions from "./regions"; export * as routes from "./routes"; export * as common from "./common"; diff --git a/site/src/content/docs/api/actors/v1/create.mdx b/site/src/content/docs/api/actors/v1/create.mdx new file mode 100644 index 0000000000..676b726357 --- /dev/null +++ b/site/src/content/docs/api/actors/v1/create.mdx @@ -0,0 +1,41 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../spec.json'; + +# actors.v1.create + +## Description +Create a new actor. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +# Write the request body to body.json before running +curl -X POST -d '@body.json' 'https://api.rivet.gg/actors' + + +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.create({ + // Add your request body here +}); +``` + + + +## Schema + + + diff --git a/site/src/content/docs/api/actors/v1/destroy.mdx b/site/src/content/docs/api/actors/v1/destroy.mdx new file mode 100644 index 0000000000..b42f7a071f --- /dev/null +++ b/site/src/content/docs/api/actors/v1/destroy.mdx @@ -0,0 +1,37 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../spec.json'; + +# actors.v1.destroy + +## Description +Destroy a actor. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +curl -X DELETE 'https://api.rivet.gg/actors/{actor}' +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.destroy({ + // Add your request body here +}); +``` + + + +## Schema + + diff --git a/site/src/content/docs/api/actors/v1/get.mdx b/site/src/content/docs/api/actors/v1/get.mdx new file mode 100644 index 0000000000..39b97ee784 --- /dev/null +++ b/site/src/content/docs/api/actors/v1/get.mdx @@ -0,0 +1,37 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../spec.json'; + +# actors.v1.get + +## Description +Gets a actor. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +curl -X GET 'https://api.rivet.gg/actors/{actor}' +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.get({ + // Add your request body here +}); +``` + + + +## Schema + + diff --git a/site/src/content/docs/api/actors/v1/list.mdx b/site/src/content/docs/api/actors/v1/list.mdx new file mode 100644 index 0000000000..c593b19b3b --- /dev/null +++ b/site/src/content/docs/api/actors/v1/list.mdx @@ -0,0 +1,37 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../spec.json'; + +# actors.v1.list + +## Description +Lists all actors associated with the token used. Can be filtered by tags in the query string. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +curl -X GET 'https://api.rivet.gg/actors' +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.list({ + // Add your request body here +}); +``` + + + +## Schema + + diff --git a/site/src/content/docs/api/actors/v1/logs/get.mdx b/site/src/content/docs/api/actors/v1/logs/get.mdx new file mode 100644 index 0000000000..fd5538cb97 --- /dev/null +++ b/site/src/content/docs/api/actors/v1/logs/get.mdx @@ -0,0 +1,37 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../../spec.json'; + +# actors.v1.logs.get + +## Description +Returns the logs for a given actor. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +curl -X GET 'https://api.rivet.gg/actors/logs' +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.logs.get({ + // Add your request body here +}); +``` + + + +## Schema + + diff --git a/site/src/content/docs/api/actors/v1/metrics/get.mdx b/site/src/content/docs/api/actors/v1/metrics/get.mdx new file mode 100644 index 0000000000..934b2cb1b7 --- /dev/null +++ b/site/src/content/docs/api/actors/v1/metrics/get.mdx @@ -0,0 +1,37 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../../spec.json'; + +# actors.v1.metrics.get + +## Description +Returns the metrics for a given actor. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +curl -X GET 'https://api.rivet.gg/actors/{actor}/metrics/history' +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.metrics.get({ + // Add your request body here +}); +``` + + + +## Schema + + diff --git a/site/src/content/docs/api/actors/v1/upgrade-all.mdx b/site/src/content/docs/api/actors/v1/upgrade-all.mdx new file mode 100644 index 0000000000..09037e40c1 --- /dev/null +++ b/site/src/content/docs/api/actors/v1/upgrade-all.mdx @@ -0,0 +1,41 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../spec.json'; + +# actors.v1.upgradeAll + +## Description +Upgrades all actors matching the given tags. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +# Write the request body to body.json before running +curl -X POST -d '@body.json' 'https://api.rivet.gg/actors/upgrade' + + +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.upgradeAll({ + // Add your request body here +}); +``` + + + +## Schema + + + diff --git a/site/src/content/docs/api/actors/v1/upgrade.mdx b/site/src/content/docs/api/actors/v1/upgrade.mdx new file mode 100644 index 0000000000..be212199cd --- /dev/null +++ b/site/src/content/docs/api/actors/v1/upgrade.mdx @@ -0,0 +1,41 @@ +{/* This file is auto-generated by `generateApi.js`. + * + * Do not edit this file directly. + */} + +import { JsonSchemaPreview, PropertyLabel } from '@/components/JsonSchemaPreview'; +import API_SCHEMA from './../../spec.json'; + +# actors.v1.upgrade + +## Description +Upgrades a actor. + +## Code Examples + + + +```bash {{ "title": "cURL" }} +# Write the request body to body.json before running +curl -X POST -d '@body.json' 'https://api.rivet.gg/actors/{actor}/upgrade' + + +``` + +```ts +// Create Rivet client +import { RivetClient } from '@rivet-gg/api'; +const RIVET = new RivetClient({ token: '[YOUR TOKEN HERE]' }); + +// Make request +await RIVET.actors.v1.upgrade({ + // Add your request body here +}); +``` + + + +## Schema + + + diff --git a/site/src/content/docs/cloud/api/actors/create.mdx b/site/src/content/docs/cloud/api/actors/create.mdx index defcc81447..8632ea47a0 100644 --- a/site/src/content/docs/cloud/api/actors/create.mdx +++ b/site/src/content/docs/cloud/api/actors/create.mdx @@ -13,11 +13,11 @@ Create a new actor. ## Code Examples - + ```bash {{ "title": "cURL" }} # Write the request body to body.json before running -curl -X POST -d '@body.json' 'https://api.rivet.gg/actors' +curl -X POST -d '@body.json' 'https://api.rivet.gg/v2/actors' ``` diff --git a/site/src/content/docs/cloud/api/actors/destroy.mdx b/site/src/content/docs/cloud/api/actors/destroy.mdx index b9b360519d..10bbe353b5 100644 --- a/site/src/content/docs/cloud/api/actors/destroy.mdx +++ b/site/src/content/docs/cloud/api/actors/destroy.mdx @@ -13,10 +13,10 @@ Destroy a actor. ## Code Examples - + ```bash {{ "title": "cURL" }} -curl -X DELETE 'https://api.rivet.gg/actors/{actor}' +curl -X DELETE 'https://api.rivet.gg/v2/actors/{actor}' ``` ```ts diff --git a/site/src/content/docs/cloud/api/actors/get.mdx b/site/src/content/docs/cloud/api/actors/get.mdx index fdcc86641b..6af48d4243 100644 --- a/site/src/content/docs/cloud/api/actors/get.mdx +++ b/site/src/content/docs/cloud/api/actors/get.mdx @@ -13,10 +13,10 @@ Gets a actor. ## Code Examples - + ```bash {{ "title": "cURL" }} -curl -X GET 'https://api.rivet.gg/actors/{actor}' +curl -X GET 'https://api.rivet.gg/v2/actors/{actor}' ``` ```ts diff --git a/site/src/content/docs/cloud/api/actors/list.mdx b/site/src/content/docs/cloud/api/actors/list.mdx index 66ae7e875a..cf0938308e 100644 --- a/site/src/content/docs/cloud/api/actors/list.mdx +++ b/site/src/content/docs/cloud/api/actors/list.mdx @@ -13,10 +13,10 @@ Lists all actors associated with the token used. Can be filtered by tags in the ## Code Examples - + ```bash {{ "title": "cURL" }} -curl -X GET 'https://api.rivet.gg/actors' +curl -X GET 'https://api.rivet.gg/v2/actors' ``` ```ts diff --git a/site/src/content/docs/cloud/api/actors/logs/get.mdx b/site/src/content/docs/cloud/api/actors/logs/get.mdx index 388338875d..7de0f675a8 100644 --- a/site/src/content/docs/cloud/api/actors/logs/get.mdx +++ b/site/src/content/docs/cloud/api/actors/logs/get.mdx @@ -13,10 +13,10 @@ Returns the logs for a given actor. ## Code Examples - + ```bash {{ "title": "cURL" }} -curl -X GET 'https://api.rivet.gg/actors/logs' +curl -X GET 'https://api.rivet.gg/v2/actors/logs' ``` ```ts diff --git a/site/src/content/docs/cloud/api/actors/metrics/get.mdx b/site/src/content/docs/cloud/api/actors/metrics/get.mdx index 0721f415f2..6106a85d28 100644 --- a/site/src/content/docs/cloud/api/actors/metrics/get.mdx +++ b/site/src/content/docs/cloud/api/actors/metrics/get.mdx @@ -13,10 +13,10 @@ Returns the metrics for a given actor. ## Code Examples - + ```bash {{ "title": "cURL" }} -curl -X GET 'https://api.rivet.gg/actors/{actor}/metrics/history' +curl -X GET 'https://api.rivet.gg/v2/actors/{actor}/metrics/history' ``` ```ts @@ -33,5 +33,5 @@ await RIVET.actors.metrics.get({ ## Schema - + diff --git a/site/src/content/docs/cloud/api/actors/upgrade-all.mdx b/site/src/content/docs/cloud/api/actors/upgrade-all.mdx index 0600baf110..9934be2bec 100644 --- a/site/src/content/docs/cloud/api/actors/upgrade-all.mdx +++ b/site/src/content/docs/cloud/api/actors/upgrade-all.mdx @@ -13,11 +13,11 @@ Upgrades all actors matching the given tags. ## Code Examples - + ```bash {{ "title": "cURL" }} # Write the request body to body.json before running -curl -X POST -d '@body.json' 'https://api.rivet.gg/actors/upgrade' +curl -X POST -d '@body.json' 'https://api.rivet.gg/v2/actors/upgrade' ``` diff --git a/site/src/content/docs/cloud/api/actors/upgrade.mdx b/site/src/content/docs/cloud/api/actors/upgrade.mdx index 48fcb2dc30..3da306f5bc 100644 --- a/site/src/content/docs/cloud/api/actors/upgrade.mdx +++ b/site/src/content/docs/cloud/api/actors/upgrade.mdx @@ -13,11 +13,11 @@ Upgrades a actor. ## Code Examples - + ```bash {{ "title": "cURL" }} # Write the request body to body.json before running -curl -X POST -d '@body.json' 'https://api.rivet.gg/actors/{actor}/upgrade' +curl -X POST -d '@body.json' 'https://api.rivet.gg/v2/actors/{actor}/upgrade' ``` diff --git a/site/src/content/docs/cloud/api/errors.mdx b/site/src/content/docs/cloud/api/errors.mdx index 10d371805a..b1e1bbcee8 100644 --- a/site/src/content/docs/cloud/api/errors.mdx +++ b/site/src/content/docs/cloud/api/errors.mdx @@ -296,6 +296,65 @@ resolved before attempting to add another hostname. An error was returned by a cloudflare API and has been re-routed to this error response. +## Container Failed To Create + +{`CONTAINER_FAILED_TO_CREATE`} + +Container failed to create. + + +## Invalid Container Ids + +{`CONTAINER_LOGS_INVALID_CONTAINER_IDS`} + +The provided list of container IDs is not in a valid JSON format. Please provide a valid JSON array of UUIDs. + + +## No Container Ids + +{`CONTAINER_LOGS_NO_CONTAINER_IDS`} + +No container IDs were provided in the request. Please provide at least one valid container ID. + + +## No Valid Container Ids + +{`CONTAINER_LOGS_NO_VALID_CONTAINER_IDS`} + +None of the provided container IDs are valid for this game/environment. Please provide valid container IDs. + + +## Invalid Interval + +{`CONTAINER_METRICS_INVALID_INTERVAL`} + +The provided interval must be greater than 0. Please provide a valid interval value in milliseconds. + +## Invalid Metrics + +{`CONTAINER_METRICS_INVALID_METRICS`} + +The provided list of metrics is not in a valid JSON format. Please provide a valid JSON array of metric names. + +## No Metrics + +{`CONTAINER_METRICS_NO_METRICS`} + +No metrics were specified in the request. Please provide at least one metric name to query. + +## Unsupported Metrics + +{`CONTAINER_METRICS_UNSUPPORTED_METRICS`} + +The requested metrics are not supported. Supported metrics include: cpu, memory, memory_limit, network_rx_bytes, network_tx_bytes. + +## Container Not Found + +{`CONTAINER_NOT_FOUND`} + +Container not found for the given ID. + + ## Environment Not Found {`ENVIRONMENT_NOT_FOUND`} diff --git a/site/src/generated/apiPages.json b/site/src/generated/apiPages.json index 21cbbef2a3..1a0b715548 100644 --- a/site/src/generated/apiPages.json +++ b/site/src/generated/apiPages.json @@ -4,20 +4,60 @@ "title": "Actors", "pages": [ { - "title": "actors.list", - "href": "/docs/cloud/api/actors/list", + "title": "actors.v1.list", + "href": "/docs/api/actors/v1/list", "sortingKey": "/actors get" }, { - "title": "actors.create", - "href": "/docs/cloud/api/actors/create", + "title": "actors.v1.create", + "href": "/docs/api/actors/v1/create", "sortingKey": "/actors post" }, { - "title": "actors.logs.get", - "href": "/docs/cloud/api/actors/logs/get", + "title": "actors.v1.logs.get", + "href": "/docs/api/actors/v1/logs/get", "sortingKey": "/actors/logs get" }, + { + "title": "actors.v1.upgradeAll", + "href": "/docs/api/actors/v1/upgrade-all", + "sortingKey": "/actors/upgrade post" + }, + { + "title": "actors.v1.destroy", + "href": "/docs/api/actors/v1/destroy", + "sortingKey": "/actors/{actor} delete" + }, + { + "title": "actors.v1.get", + "href": "/docs/api/actors/v1/get", + "sortingKey": "/actors/{actor} get" + }, + { + "title": "actors.v1.metrics.get", + "href": "/docs/api/actors/v1/metrics/get", + "sortingKey": "/actors/{actor}/metrics/history get" + }, + { + "title": "actors.v1.upgrade", + "href": "/docs/api/actors/v1/upgrade", + "sortingKey": "/actors/{actor}/upgrade post" + }, + { + "title": "actors.list", + "href": "/docs/api/actors/list", + "sortingKey": "/v2/actors get" + }, + { + "title": "actors.create", + "href": "/docs/api/actors/create", + "sortingKey": "/v2/actors post" + }, + { + "title": "actors.logs.get", + "href": "/docs/api/actors/logs/get", + "sortingKey": "/v2/actors/logs get" + }, { "title": "actors.logs.export", "href": "/docs/cloud/api/actors/logs/export", @@ -30,8 +70,8 @@ }, { "title": "actors.upgradeAll", - "href": "/docs/cloud/api/actors/upgrade-all", - "sortingKey": "/actors/upgrade post" + "href": "/docs/api/actors/upgrade-all", + "sortingKey": "/v2/actors/upgrade post" }, { "title": "actors.usage", @@ -40,23 +80,23 @@ }, { "title": "actors.destroy", - "href": "/docs/cloud/api/actors/destroy", - "sortingKey": "/actors/{actor} delete" + "href": "/docs/api/actors/destroy", + "sortingKey": "/v2/actors/{actor} delete" }, { "title": "actors.get", - "href": "/docs/cloud/api/actors/get", - "sortingKey": "/actors/{actor} get" + "href": "/docs/api/actors/get", + "sortingKey": "/v2/actors/{actor} get" }, { "title": "actors.metrics.get", - "href": "/docs/cloud/api/actors/metrics/get", - "sortingKey": "/actors/{actor}/metrics/history get" + "href": "/docs/api/actors/metrics/get", + "sortingKey": "/v2/actors/{actor}/metrics/history get" }, { "title": "actors.upgrade", - "href": "/docs/cloud/api/actors/upgrade", - "sortingKey": "/actors/{actor}/upgrade post" + "href": "/docs/api/actors/upgrade", + "sortingKey": "/v2/actors/{actor}/upgrade post" } ] },