diff --git a/.codegen.json b/.codegen.json index 65077c1cc..c5c1a82f3 100644 --- a/.codegen.json +++ b/.codegen.json @@ -15,7 +15,6 @@ ], "post_generate": [ "make fmt", - "pytest -m 'not integration' --cov=databricks --cov-report html tests", "pip install .", "python3.12 docs/gen-client-docs.py" ] diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 27c63e442..20842dced 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -608df7153d64c19e2d255144c9935fd4ed45900a \ No newline at end of file +c4784cea599325a13472b1455e7434d639362d8b \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index e68ce1aca..8bee41bad 100755 --- a/.gitattributes +++ b/.gitattributes @@ -9,10 +9,15 @@ databricks/sdk/service/cleanrooms.py linguist-generated=true databricks/sdk/service/compute.py linguist-generated=true databricks/sdk/service/dashboards.py linguist-generated=true databricks/sdk/service/database.py linguist-generated=true +databricks/sdk/service/dataquality.py linguist-generated=true databricks/sdk/service/files.py linguist-generated=true +databricks/sdk/service/httpcallv2.py linguist-generated=true databricks/sdk/service/iam.py linguist-generated=true databricks/sdk/service/iamv2.py linguist-generated=true +databricks/sdk/service/idempotencytesting.py linguist-generated=true databricks/sdk/service/jobs.py linguist-generated=true +databricks/sdk/service/jsonmarshallv2.py linguist-generated=true +databricks/sdk/service/lrotesting.py linguist-generated=true databricks/sdk/service/marketplace.py linguist-generated=true databricks/sdk/service/ml.py linguist-generated=true databricks/sdk/service/oauth2.py linguist-generated=true @@ -27,3 +32,6 @@ databricks/sdk/service/sql.py linguist-generated=true databricks/sdk/service/tags.py linguist-generated=true databricks/sdk/service/vectorsearch.py linguist-generated=true databricks/sdk/service/workspace.py linguist-generated=true +test_http_call.py linguist-generated=true +test_json_marshall.py linguist-generated=true +test_lro_call.py linguist-generated=true diff --git a/.vscode/settings.json b/.vscode/settings.json index c36b4db6c..c2c2020ec 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -4,7 +4,7 @@ ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true, - "python.envFile": "${workspaceFolder}/.databricks/.databricks.env", + "python.envFile": "${workspaceRoot}/.env", "databricks.python.envFile": "${workspaceFolder}/.env", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------" diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index ee1d5ee71..ca1311a50 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -14,3 +14,243 @@ ### Internal Changes ### API Changes +* Add `databricks.sdk.service.dataquality` package. +* Add [w.data_quality](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dataquality/data_quality.html) workspace-level service. +* Add `create_update()` and `get_update()` methods for [w.apps](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/apps/apps.html) workspace-level service. +* Add `compute_size` field for `databricks.sdk.service.apps.App`. +* Add `genie_space` field for `databricks.sdk.service.apps.AppResource`. +* Add `skip_validation` field for `databricks.sdk.service.catalog.AccountsCreateStorageCredential`. +* Add `skip_validation` field for `databricks.sdk.service.catalog.AccountsUpdateStorageCredential`. +* Add `aliases`, `browse_only`, `created_at`, `created_by`, `full_name`, `metastore_id`, `owner`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.CreateRegisteredModelRequest`. +* Add `catalog_name`, `id`, `model_name` and `schema_name` fields for `databricks.sdk.service.catalog.RegisteredModelAlias`. +* Add `aliases`, `catalog_name`, `created_at`, `created_by`, `id`, `metastore_id`, `model_name`, `model_version_dependencies`, `run_id`, `run_workspace_id`, `schema_name`, `source`, `status`, `storage_location`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.UpdateModelVersionRequest`. +* Add `aliases`, `browse_only`, `catalog_name`, `created_at`, `created_by`, `metastore_id`, `name`, `schema_name`, `storage_location`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.UpdateRegisteredModelRequest`. +* Add `key_region` field for `databricks.sdk.service.provisioning.CreateAwsKeyInfo`. +* Add `role_arn` field for `databricks.sdk.service.provisioning.CreateStorageConfigurationRequest`. +* Add `azure_key_info` field for `databricks.sdk.service.provisioning.CustomerManagedKey`. +* [Breaking] Add `customer_facing_private_access_settings` field for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* Add `role_arn` field for `databricks.sdk.service.provisioning.StorageConfiguration`. +* [Breaking] Add `customer_facing_workspace` field for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `update_mask` field for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `compute_mode`, `network`, `network_connectivity_config_id` and `storage_mode` fields for `databricks.sdk.service.provisioning.Workspace`. +* Add `enable_serverless_compute` field for `databricks.sdk.service.sql.GetWorkspaceWarehouseConfigResponse`. +* Add `page_size` and `page_token` fields for `databricks.sdk.service.sql.ListWarehousesRequest`. +* Add `next_page_token` field for `databricks.sdk.service.sql.ListWarehousesResponse`. +* Add `enable_serverless_compute` field for `databricks.sdk.service.sql.SetWorkspaceWarehouseConfigRequest`. +* Add `model_version_status_unknown` enum value for `databricks.sdk.service.catalog.ModelVersionInfoStatus`. +* Add `k8s_active_pod_quota_exceeded` and `cloud_account_pod_quota_exceeded` enum values for `databricks.sdk.service.compute.TerminationReasonCode`. +* Add `internal_catalog_asset_creation_ongoing_exception`, `internal_catalog_asset_creation_failed_exception` and `internal_catalog_asset_creation_unsupported_exception` enum values for `databricks.sdk.service.dashboards.MessageErrorType`. +* Add `ssh_bootstrap_failure`, `aws_inaccessible_kms_key_failure`, `init_container_not_finished`, `spark_image_download_throttled`, `spark_image_not_found`, `cluster_operation_throttled`, `cluster_operation_timeout`, `serverless_long_running_terminated`, `azure_packed_deployment_partial_failure`, `invalid_worker_image_failure`, `workspace_update`, `invalid_aws_parameter`, `driver_out_of_disk`, `driver_out_of_memory`, `driver_launch_timeout`, `driver_unexpected_failure`, `unexpected_pod_recreation`, `gcp_inaccessible_kms_key_failure`, `gcp_kms_key_permission_denied`, `driver_eviction`, `user_initiated_vm_termination`, `gcp_iam_timeout`, `aws_resource_quota_exceeded`, `cloud_account_setup_failure`, `aws_invalid_key_pair`, `driver_pod_creation_failure`, `maintenance_mode`, `internal_capacity_failure`, `executor_pod_unscheduled`, `storage_download_failure_slow`, `storage_download_failure_throttled`, `dynamic_spark_conf_size_exceeded`, `aws_instance_profile_update_failure`, `instance_pool_not_found`, `instance_pool_max_capacity_reached`, `aws_invalid_kms_key_state`, `gcp_insufficient_capacity`, `gcp_api_rate_quota_exceeded`, `gcp_resource_quota_exceeded`, `gcp_ip_space_exhausted`, `gcp_service_account_access_denied`, `gcp_service_account_not_found`, `gcp_forbidden`, `gcp_not_found`, `resource_usage_blocked`, `data_access_config_changed`, `access_token_failure`, `invalid_instance_placement_protocol`, `budget_policy_resolution_failure`, `in_penalty_box`, `disaster_recovery_replication`, `bootstrap_timeout_due_to_misconfig`, `instance_unreachable_due_to_misconfig`, `storage_download_failure_due_to_misconfig`, `control_plane_request_failure_due_to_misconfig`, `cloud_provider_launch_failure_due_to_misconfig`, `gcp_subnet_not_ready`, `cloud_operation_cancelled`, `cloud_provider_instance_not_launched`, `gcp_trusted_image_projects_violated`, `budget_policy_limit_enforcement_activated`, `eos_spark_image`, `no_matched_k8s`, `lazy_allocation_timeout`, `driver_node_unreachable`, `secret_creation_failure`, `pod_scheduling_failure`, `pod_assignment_failure`, `allocation_timeout`, `allocation_timeout_no_unallocated_clusters`, `allocation_timeout_no_matched_clusters`, `allocation_timeout_no_ready_clusters`, `allocation_timeout_no_warmed_up_clusters`, `allocation_timeout_node_daemon_not_ready`, `allocation_timeout_no_healthy_clusters`, `netvisor_setup_timeout`, `no_matched_k8s_testing_tag`, `cloud_provider_resource_stockout_due_to_misconfig`, `gke_based_cluster_termination`, `allocation_timeout_no_healthy_and_warmed_up_clusters`, `docker_invalid_os_exception`, `docker_container_creation_exception`, `docker_image_too_large_for_instance_exception`, `dns_resolution_error`, `gcp_denied_by_org_policy`, `secret_permission_denied`, `network_check_nic_failure`, `network_check_dns_server_failure`, `network_check_storage_failure`, `network_check_metadata_endpoint_failure`, `network_check_control_plane_failure`, `network_check_multiple_components_failure`, `driver_unhealthy`, `security_agents_failed_initial_verification`, `driver_dns_resolution_failure`, `no_activated_k8s`, `usage_policy_entitlement_denied`, `no_activated_k8s_testing_tag`, `k8s_active_pod_quota_exceeded` and `cloud_account_pod_quota_exceeded` enum values for `databricks.sdk.service.sql.TerminationReasonCode`. +* [Breaking] Change `create()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsCreateMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `delete()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsDeleteMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsUpdateMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `create()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsCreateMetastoreResponse` dataclass. +* [Breaking] Change `delete()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsDeleteMetastoreResponse` dataclass. +* [Breaking] Change `get()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsGetMetastoreResponse` dataclass. +* [Breaking] Change `list()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsListMetastoresResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsUpdateMetastoreResponse` dataclass. +* [Breaking] Change `create()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to return `databricks.sdk.service.catalog.AccountsCreateStorageCredentialInfo` dataclass. +* [Breaking] Change `delete()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsDeleteStorageCredentialResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to return `databricks.sdk.service.catalog.AccountsUpdateStorageCredentialResponse` dataclass. +* [Breaking] Change `create()` method for [w.registered_models](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/registered_models.html) workspace-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/credentials.html) account-level service to start returning `databricks.sdk.service.provisioning.Credential` dataclass. +* [Breaking] Change `delete()` method for [a.encryption_keys](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/encryption_keys.html) account-level service to start returning `databricks.sdk.service.provisioning.CustomerManagedKey` dataclass. +* [Breaking] Change `create()` method for [a.networks](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/networks.html) account-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.networks](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/networks.html) account-level service to start returning `databricks.sdk.service.provisioning.Network` dataclass. +* [Breaking] Change `create()` and `replace()` methods for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service with new required argument order. +* [Breaking] Change `delete()` and `replace()` methods for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service to start returning `databricks.sdk.service.provisioning.PrivateAccessSettings` dataclass. +* [Breaking] Change `delete()` method for [a.storage](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/storage.html) account-level service to start returning `databricks.sdk.service.provisioning.StorageConfiguration` dataclass. +* [Breaking] Change `create()` method for [a.vpc_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/vpc_endpoints.html) account-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.vpc_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/vpc_endpoints.html) account-level service to start returning `databricks.sdk.service.provisioning.VpcEndpoint` dataclass. +* [Breaking] Change `create()` and `update()` methods for [a.workspaces](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html) account-level service with new required argument order. +* [Breaking] Change `delete()` and `update()` methods for [a.workspaces](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html) account-level service to start returning `databricks.sdk.service.provisioning.Workspace` dataclass. +* [Breaking] Change `execute_statement()` method for [w.statement_execution](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/sql/statement_execution.html) workspace-level service . Method path has changed. +* [Breaking] Change `metastore_info` field for `databricks.sdk.service.catalog.AccountsCreateMetastore` to type `databricks.sdk.service.catalog.CreateAccountsMetastore` dataclass. +* [Breaking] Change `credential_info` field for `databricks.sdk.service.catalog.AccountsCreateStorageCredential` to type `databricks.sdk.service.catalog.CreateAccountsStorageCredential` dataclass. +* [Breaking] Change `metastore_info` field for `databricks.sdk.service.catalog.AccountsUpdateMetastore` to type `databricks.sdk.service.catalog.UpdateAccountsMetastore` dataclass. +* [Breaking] Change `credential_info` field for `databricks.sdk.service.catalog.AccountsUpdateStorageCredential` to type `databricks.sdk.service.catalog.UpdateAccountsStorageCredential` dataclass. +* Change `catalog_name`, `name` and `schema_name` fields for `databricks.sdk.service.catalog.CreateRegisteredModelRequest` to no longer be required. +* Change `network_name` field for `databricks.sdk.service.provisioning.CreateNetworkRequest` to no longer be required. +* Change `private_access_settings_name` and `region` fields for `databricks.sdk.service.provisioning.CreatePrivateAccessSettingsRequest` to no longer be required. +* Change `vpc_endpoint_name` field for `databricks.sdk.service.provisioning.CreateVpcEndpointRequest` to no longer be required. +* Change `workspace_name` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest` to no longer be required. +* [Breaking] Change `dataplane_relay` and `rest_api` fields for `databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required. +* Change `dataplane_relay` and `rest_api` fields for `databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required. +* [Breaking] Change waiter for [WorkspacesAPI.update](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html#databricks.sdk.service.provisioning.WorkspacesAPI.update) method. +* [Breaking] Remove `browse_only` field for `databricks.sdk.service.catalog.ModelVersionInfo`. +* [Breaking] Remove `jar_dependencies` field for `databricks.sdk.service.compute.Environment`. +* [Breaking] Remove `is_no_public_ip_enabled` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* [Breaking] Remove `allowed_vpc_endpoint_ids`, `private_access_level`, `private_access_settings_name`, `public_access_enabled` and `region` fields for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* [Breaking] Remove `external_id` field for `databricks.sdk.service.provisioning.StsRole`. +* [Breaking] Remove `aws_region`, `credentials_id`, `custom_tags`, `managed_services_customer_managed_key_id`, `network_connectivity_config_id`, `network_id`, `private_access_settings_id`, `storage_configuration_id` and `storage_customer_managed_key_id` fields for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* [Breaking] Remove `external_customer_info` and `is_no_public_ip_enabled` fields for `databricks.sdk.service.provisioning.Workspace`. +* [Breaking] Remove `status_unspecified` enum value for `databricks.sdk.service.sql.Status`. +* Add `browse_only` field for `databricks.sdk.service.catalog.ModelVersionInfo`. +* Add `jar_dependencies` field for `databricks.sdk.service.compute.Environment`. +* Add `is_no_public_ip_enabled` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* Add `allowed_vpc_endpoint_ids`, `private_access_level` and `public_access_enabled` fields for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* [Breaking] Add `private_access_settings_name` and `region` fields for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* Add `external_id` field for `databricks.sdk.service.provisioning.StsRole`. +* Add `aws_region`, `credentials_id`, `custom_tags`, `managed_services_customer_managed_key_id`, `network_connectivity_config_id`, `network_id`, `private_access_settings_id`, `storage_configuration_id` and `storage_customer_managed_key_id` fields for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `external_customer_info` and `is_no_public_ip_enabled` fields for `databricks.sdk.service.provisioning.Workspace`. +* Add `status_unspecified` enum value for `databricks.sdk.service.sql.Status`. +* [Breaking] Change `create()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to no longer return `databricks.sdk.service.catalog.AccountsCreateMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `delete()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to no longer return `databricks.sdk.service.catalog.AccountsDeleteMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to no longer return `databricks.sdk.service.catalog.AccountsUpdateMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `create()`, `get()` and `update()` methods for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsMetastoreInfo` dataclass. +* [Breaking] Change `delete()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to no longer return `databricks.sdk.service.catalog.AccountsDeleteMetastoreResponse` dataclass. +* [Breaking] Change `list()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.ListMetastoresResponse` dataclass. +* [Breaking] Change `create()` and `update()` methods for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to return `databricks.sdk.service.catalog.AccountsStorageCredentialInfo` dataclass. +* [Breaking] Change `delete()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to no longer return `databricks.sdk.service.catalog.AccountsDeleteStorageCredentialResponse` dataclass. +* [Breaking] Change `create()` method for [w.registered_models](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/registered_models.html) workspace-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/credentials.html) account-level service to no longer return `databricks.sdk.service.provisioning.Credential` dataclass. +* [Breaking] Change `delete()` method for [a.encryption_keys](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/encryption_keys.html) account-level service to no longer return `databricks.sdk.service.provisioning.CustomerManagedKey` dataclass. +* [Breaking] Change `create()` method for [a.networks](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/networks.html) account-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.networks](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/networks.html) account-level service to no longer return `databricks.sdk.service.provisioning.Network` dataclass. +* [Breaking] Change `create()` and `replace()` methods for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service with new required argument order. +* [Breaking] Change `delete()` and `replace()` methods for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service to no longer return `databricks.sdk.service.provisioning.PrivateAccessSettings` dataclass. +* [Breaking] Change `delete()` method for [a.storage](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/storage.html) account-level service to no longer return `databricks.sdk.service.provisioning.StorageConfiguration` dataclass. +* [Breaking] Change `create()` method for [a.vpc_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/vpc_endpoints.html) account-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.vpc_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/vpc_endpoints.html) account-level service to no longer return `databricks.sdk.service.provisioning.VpcEndpoint` dataclass. +* [Breaking] Change `create()` and `update()` methods for [a.workspaces](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html) account-level service with new required argument order. +* [Breaking] Change `delete()` and `update()` methods for [a.workspaces](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html) account-level service to no longer return `databricks.sdk.service.provisioning.Workspace` dataclass. +* [Breaking] Change `execute_statement()` method for [w.statement_execution](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/sql/statement_execution.html) workspace-level service . Method path has changed. +* [Breaking] Change `metastore_info` field for `databricks.sdk.service.catalog.AccountsCreateMetastore` to type `databricks.sdk.service.catalog.CreateMetastore` dataclass. +* [Breaking] Change `credential_info` field for `databricks.sdk.service.catalog.AccountsCreateStorageCredential` to type `databricks.sdk.service.catalog.CreateStorageCredential` dataclass. +* [Breaking] Change `metastore_info` field for `databricks.sdk.service.catalog.AccountsUpdateMetastore` to type `databricks.sdk.service.catalog.UpdateMetastore` dataclass. +* [Breaking] Change `credential_info` field for `databricks.sdk.service.catalog.AccountsUpdateStorageCredential` to type `databricks.sdk.service.catalog.UpdateStorageCredential` dataclass. +* [Breaking] Change `catalog_name`, `name` and `schema_name` fields for `databricks.sdk.service.catalog.CreateRegisteredModelRequest` to be required. +* [Breaking] Change `name` field for `databricks.sdk.service.database.DatabaseInstanceRole` to no longer be required. +* Change `name` field for `databricks.sdk.service.database.DatabaseInstanceRole` to no longer be required. +* [Breaking] Change `network_name` field for `databricks.sdk.service.provisioning.CreateNetworkRequest` to be required. +* [Breaking] Change `private_access_settings_name` and `region` fields for `databricks.sdk.service.provisioning.CreatePrivateAccessSettingsRequest` to be required. +* [Breaking] Change `vpc_endpoint_name` field for `databricks.sdk.service.provisioning.CreateVpcEndpointRequest` to be required. +* [Breaking] Change `workspace_name` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest` to be required. +* Change `dataplane_relay` and `rest_api` fields for `databricks.sdk.service.provisioning.NetworkVpcEndpoints` to be required. +* [Breaking] Change `dataplane_relay` and `rest_api` fields for `databricks.sdk.service.provisioning.NetworkVpcEndpoints` to be required. +* [Breaking] Change waiter for [WorkspacesAPI.update](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html#databricks.sdk.service.provisioning.WorkspacesAPI.update) method. +* [Breaking] Remove `databricks.sdk.service.dataquality` package. +* [Breaking] Remove `create_update()` and `get_update()` methods for [w.apps](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/apps/apps.html) workspace-level service. +* [Breaking] Remove `update_notifications()` method for [w.serving_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/serving/serving_endpoints.html) workspace-level service. +* [Breaking] Remove `compute_size` field for `databricks.sdk.service.apps.App`. +* [Breaking] Remove `genie_space` field for `databricks.sdk.service.apps.AppResource`. +* [Breaking] Remove `skip_validation` field for `databricks.sdk.service.catalog.AccountsCreateStorageCredential`. +* [Breaking] Remove `skip_validation` field for `databricks.sdk.service.catalog.AccountsUpdateStorageCredential`. +* [Breaking] Remove `aliases`, `browse_only`, `created_at`, `created_by`, `full_name`, `metastore_id`, `owner`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.CreateRegisteredModelRequest`. +* [Breaking] Remove `catalog_name`, `id`, `model_name` and `schema_name` fields for `databricks.sdk.service.catalog.RegisteredModelAlias`. +* [Breaking] Remove `aliases`, `catalog_name`, `created_at`, `created_by`, `id`, `metastore_id`, `model_name`, `model_version_dependencies`, `run_id`, `run_workspace_id`, `schema_name`, `source`, `status`, `storage_location`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.UpdateModelVersionRequest`. +* [Breaking] Remove `aliases`, `browse_only`, `catalog_name`, `created_at`, `created_by`, `metastore_id`, `name`, `schema_name`, `storage_location`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.UpdateRegisteredModelRequest`. +* [Breaking] Remove `parameters` field for `databricks.sdk.service.dashboards.GenieQueryAttachment`. +* [Breaking] Remove `database_instance_name` field for `databricks.sdk.service.database.CreateDatabaseInstanceRoleRequest`. +* [Breaking] Remove `custom_tags`, `effective_custom_tags`, `effective_usage_policy_id` and `usage_policy_id` fields for `databricks.sdk.service.database.DatabaseInstance`. +* [Breaking] Remove `effective_attributes` and `instance_name` fields for `databricks.sdk.service.database.DatabaseInstanceRole`. +* [Breaking] Remove `key_region` field for `databricks.sdk.service.provisioning.CreateAwsKeyInfo`. +* [Breaking] Remove `role_arn` field for `databricks.sdk.service.provisioning.CreateStorageConfigurationRequest`. +* [Breaking] Remove `azure_key_info` field for `databricks.sdk.service.provisioning.CustomerManagedKey`. +* [Breaking] Remove `customer_facing_private_access_settings` field for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* [Breaking] Remove `role_arn` field for `databricks.sdk.service.provisioning.StorageConfiguration`. +* [Breaking] Remove `customer_facing_workspace` and `update_mask` fields for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* [Breaking] Remove `compute_mode`, `network`, `network_connectivity_config_id` and `storage_mode` fields for `databricks.sdk.service.provisioning.Workspace`. +* [Breaking] Remove `enable_serverless_compute` field for `databricks.sdk.service.sql.GetWorkspaceWarehouseConfigResponse`. +* [Breaking] Remove `page_size` and `page_token` fields for `databricks.sdk.service.sql.ListWarehousesRequest`. +* [Breaking] Remove `next_page_token` field for `databricks.sdk.service.sql.ListWarehousesResponse`. +* [Breaking] Remove `enable_serverless_compute` field for `databricks.sdk.service.sql.SetWorkspaceWarehouseConfigRequest`. +* [Breaking] Remove `model_version_status_unknown` enum value for `databricks.sdk.service.catalog.ModelVersionInfoStatus`. +* [Breaking] Remove `stream_native` enum value for `databricks.sdk.service.catalog.SystemType`. +* [Breaking] Remove `k8s_active_pod_quota_exceeded` and `cloud_account_pod_quota_exceeded` enum values for `databricks.sdk.service.compute.TerminationReasonCode`. +* [Breaking] Remove `exceeded_max_token_length_exception`, `internal_catalog_asset_creation_ongoing_exception`, `internal_catalog_asset_creation_failed_exception` and `internal_catalog_asset_creation_unsupported_exception` enum values for `databricks.sdk.service.dashboards.MessageErrorType`. +* [Breaking] Remove `ssh_bootstrap_failure`, `aws_inaccessible_kms_key_failure`, `init_container_not_finished`, `spark_image_download_throttled`, `spark_image_not_found`, `cluster_operation_throttled`, `cluster_operation_timeout`, `serverless_long_running_terminated`, `azure_packed_deployment_partial_failure`, `invalid_worker_image_failure`, `workspace_update`, `invalid_aws_parameter`, `driver_out_of_disk`, `driver_out_of_memory`, `driver_launch_timeout`, `driver_unexpected_failure`, `unexpected_pod_recreation`, `gcp_inaccessible_kms_key_failure`, `gcp_kms_key_permission_denied`, `driver_eviction`, `user_initiated_vm_termination`, `gcp_iam_timeout`, `aws_resource_quota_exceeded`, `cloud_account_setup_failure`, `aws_invalid_key_pair`, `driver_pod_creation_failure`, `maintenance_mode`, `internal_capacity_failure`, `executor_pod_unscheduled`, `storage_download_failure_slow`, `storage_download_failure_throttled`, `dynamic_spark_conf_size_exceeded`, `aws_instance_profile_update_failure`, `instance_pool_not_found`, `instance_pool_max_capacity_reached`, `aws_invalid_kms_key_state`, `gcp_insufficient_capacity`, `gcp_api_rate_quota_exceeded`, `gcp_resource_quota_exceeded`, `gcp_ip_space_exhausted`, `gcp_service_account_access_denied`, `gcp_service_account_not_found`, `gcp_forbidden`, `gcp_not_found`, `resource_usage_blocked`, `data_access_config_changed`, `access_token_failure`, `invalid_instance_placement_protocol`, `budget_policy_resolution_failure`, `in_penalty_box`, `disaster_recovery_replication`, `bootstrap_timeout_due_to_misconfig`, `instance_unreachable_due_to_misconfig`, `storage_download_failure_due_to_misconfig`, `control_plane_request_failure_due_to_misconfig`, `cloud_provider_launch_failure_due_to_misconfig`, `gcp_subnet_not_ready`, `cloud_operation_cancelled`, `cloud_provider_instance_not_launched`, `gcp_trusted_image_projects_violated`, `budget_policy_limit_enforcement_activated`, `eos_spark_image`, `no_matched_k8s`, `lazy_allocation_timeout`, `driver_node_unreachable`, `secret_creation_failure`, `pod_scheduling_failure`, `pod_assignment_failure`, `allocation_timeout`, `allocation_timeout_no_unallocated_clusters`, `allocation_timeout_no_matched_clusters`, `allocation_timeout_no_ready_clusters`, `allocation_timeout_no_warmed_up_clusters`, `allocation_timeout_node_daemon_not_ready`, `allocation_timeout_no_healthy_clusters`, `netvisor_setup_timeout`, `no_matched_k8s_testing_tag`, `cloud_provider_resource_stockout_due_to_misconfig`, `gke_based_cluster_termination`, `allocation_timeout_no_healthy_and_warmed_up_clusters`, `docker_invalid_os_exception`, `docker_container_creation_exception`, `docker_image_too_large_for_instance_exception`, `dns_resolution_error`, `gcp_denied_by_org_policy`, `secret_permission_denied`, `network_check_nic_failure`, `network_check_dns_server_failure`, `network_check_storage_failure`, `network_check_metadata_endpoint_failure`, `network_check_control_plane_failure`, `network_check_multiple_components_failure`, `driver_unhealthy`, `security_agents_failed_initial_verification`, `driver_dns_resolution_failure`, `no_activated_k8s`, `usage_policy_entitlement_denied`, `no_activated_k8s_testing_tag`, `k8s_active_pod_quota_exceeded` and `cloud_account_pod_quota_exceeded` enum values for `databricks.sdk.service.sql.TerminationReasonCode`. +* Add `databricks.sdk.service.dataquality` package. +* Add [w.data_quality](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dataquality/data_quality.html) workspace-level service. +* Add `create_update()` and `get_update()` methods for [w.apps](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/apps/apps.html) workspace-level service. +* Add `update_notifications()` method for [w.serving_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/serving/serving_endpoints.html) workspace-level service. +* Add `compute_size` field for `databricks.sdk.service.apps.App`. +* Add `genie_space` field for `databricks.sdk.service.apps.AppResource`. +* Add `skip_validation` field for `databricks.sdk.service.catalog.AccountsCreateStorageCredential`. +* Add `skip_validation` field for `databricks.sdk.service.catalog.AccountsUpdateStorageCredential`. +* Add `aliases`, `browse_only`, `created_at`, `created_by`, `full_name`, `metastore_id`, `owner`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.CreateRegisteredModelRequest`. +* Add `include_unbound` field for `databricks.sdk.service.catalog.ListCatalogsRequest`. +* Add `include_unbound` field for `databricks.sdk.service.catalog.ListCredentialsRequest`. +* Add `include_unbound` field for `databricks.sdk.service.catalog.ListExternalLocationsRequest`. +* Add `include_unbound` field for `databricks.sdk.service.catalog.ListStorageCredentialsRequest`. +* Add `catalog_name`, `id`, `model_name` and `schema_name` fields for `databricks.sdk.service.catalog.RegisteredModelAlias`. +* Add `aliases`, `catalog_name`, `created_at`, `created_by`, `id`, `metastore_id`, `model_name`, `model_version_dependencies`, `run_id`, `run_workspace_id`, `schema_name`, `source`, `status`, `storage_location`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.UpdateModelVersionRequest`. +* Add `aliases`, `browse_only`, `catalog_name`, `created_at`, `created_by`, `metastore_id`, `name`, `schema_name`, `storage_location`, `updated_at` and `updated_by` fields for `databricks.sdk.service.catalog.UpdateRegisteredModelRequest`. +* Add `parameters` field for `databricks.sdk.service.dashboards.GenieQueryAttachment`. +* Add `database_instance_name` field for `databricks.sdk.service.database.CreateDatabaseInstanceRoleRequest`. +* Add `custom_tags`, `effective_custom_tags`, `effective_usage_policy_id` and `usage_policy_id` fields for `databricks.sdk.service.database.DatabaseInstance`. +* Add `effective_attributes` and `instance_name` fields for `databricks.sdk.service.database.DatabaseInstanceRole`. +* Add `key_region` field for `databricks.sdk.service.provisioning.CreateAwsKeyInfo`. +* Add `role_arn` field for `databricks.sdk.service.provisioning.CreateStorageConfigurationRequest`. +* Add `azure_key_info` field for `databricks.sdk.service.provisioning.CustomerManagedKey`. +* [Breaking] Add `customer_facing_private_access_settings` field for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* Add `role_arn` field for `databricks.sdk.service.provisioning.StorageConfiguration`. +* [Breaking] Add `customer_facing_workspace` field for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `update_mask` field for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `compute_mode`, `network`, `network_connectivity_config_id` and `storage_mode` fields for `databricks.sdk.service.provisioning.Workspace`. +* Add `enable_serverless_compute` field for `databricks.sdk.service.sql.GetWorkspaceWarehouseConfigResponse`. +* Add `page_size` and `page_token` fields for `databricks.sdk.service.sql.ListWarehousesRequest`. +* Add `next_page_token` field for `databricks.sdk.service.sql.ListWarehousesResponse`. +* Add `enable_serverless_compute` field for `databricks.sdk.service.sql.SetWorkspaceWarehouseConfigRequest`. +* Add `model_version_status_unknown` enum value for `databricks.sdk.service.catalog.ModelVersionInfoStatus`. +* Add `stream_native` enum value for `databricks.sdk.service.catalog.SystemType`. +* Add `k8s_active_pod_quota_exceeded` and `cloud_account_pod_quota_exceeded` enum values for `databricks.sdk.service.compute.TerminationReasonCode`. +* Add `exceeded_max_token_length_exception`, `internal_catalog_asset_creation_ongoing_exception`, `internal_catalog_asset_creation_failed_exception` and `internal_catalog_asset_creation_unsupported_exception` enum values for `databricks.sdk.service.dashboards.MessageErrorType`. +* Add `asset_type_mcp` enum value for `databricks.sdk.service.marketplace.AssetType`. +* Add `ssh_bootstrap_failure`, `aws_inaccessible_kms_key_failure`, `init_container_not_finished`, `spark_image_download_throttled`, `spark_image_not_found`, `cluster_operation_throttled`, `cluster_operation_timeout`, `serverless_long_running_terminated`, `azure_packed_deployment_partial_failure`, `invalid_worker_image_failure`, `workspace_update`, `invalid_aws_parameter`, `driver_out_of_disk`, `driver_out_of_memory`, `driver_launch_timeout`, `driver_unexpected_failure`, `unexpected_pod_recreation`, `gcp_inaccessible_kms_key_failure`, `gcp_kms_key_permission_denied`, `driver_eviction`, `user_initiated_vm_termination`, `gcp_iam_timeout`, `aws_resource_quota_exceeded`, `cloud_account_setup_failure`, `aws_invalid_key_pair`, `driver_pod_creation_failure`, `maintenance_mode`, `internal_capacity_failure`, `executor_pod_unscheduled`, `storage_download_failure_slow`, `storage_download_failure_throttled`, `dynamic_spark_conf_size_exceeded`, `aws_instance_profile_update_failure`, `instance_pool_not_found`, `instance_pool_max_capacity_reached`, `aws_invalid_kms_key_state`, `gcp_insufficient_capacity`, `gcp_api_rate_quota_exceeded`, `gcp_resource_quota_exceeded`, `gcp_ip_space_exhausted`, `gcp_service_account_access_denied`, `gcp_service_account_not_found`, `gcp_forbidden`, `gcp_not_found`, `resource_usage_blocked`, `data_access_config_changed`, `access_token_failure`, `invalid_instance_placement_protocol`, `budget_policy_resolution_failure`, `in_penalty_box`, `disaster_recovery_replication`, `bootstrap_timeout_due_to_misconfig`, `instance_unreachable_due_to_misconfig`, `storage_download_failure_due_to_misconfig`, `control_plane_request_failure_due_to_misconfig`, `cloud_provider_launch_failure_due_to_misconfig`, `gcp_subnet_not_ready`, `cloud_operation_cancelled`, `cloud_provider_instance_not_launched`, `gcp_trusted_image_projects_violated`, `budget_policy_limit_enforcement_activated`, `eos_spark_image`, `no_matched_k8s`, `lazy_allocation_timeout`, `driver_node_unreachable`, `secret_creation_failure`, `pod_scheduling_failure`, `pod_assignment_failure`, `allocation_timeout`, `allocation_timeout_no_unallocated_clusters`, `allocation_timeout_no_matched_clusters`, `allocation_timeout_no_ready_clusters`, `allocation_timeout_no_warmed_up_clusters`, `allocation_timeout_node_daemon_not_ready`, `allocation_timeout_no_healthy_clusters`, `netvisor_setup_timeout`, `no_matched_k8s_testing_tag`, `cloud_provider_resource_stockout_due_to_misconfig`, `gke_based_cluster_termination`, `allocation_timeout_no_healthy_and_warmed_up_clusters`, `docker_invalid_os_exception`, `docker_container_creation_exception`, `docker_image_too_large_for_instance_exception`, `dns_resolution_error`, `gcp_denied_by_org_policy`, `secret_permission_denied`, `network_check_nic_failure`, `network_check_dns_server_failure`, `network_check_storage_failure`, `network_check_metadata_endpoint_failure`, `network_check_control_plane_failure`, `network_check_multiple_components_failure`, `driver_unhealthy`, `security_agents_failed_initial_verification`, `driver_dns_resolution_failure`, `no_activated_k8s`, `usage_policy_entitlement_denied`, `no_activated_k8s_testing_tag`, `k8s_active_pod_quota_exceeded` and `cloud_account_pod_quota_exceeded` enum values for `databricks.sdk.service.sql.TerminationReasonCode`. +* [Breaking] Change `create()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsCreateMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `delete()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsDeleteMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_metastore_assignments](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastore_assignments.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsUpdateMetastoreAssignmentResponse` dataclass. +* [Breaking] Change `create()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsCreateMetastoreResponse` dataclass. +* [Breaking] Change `delete()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsDeleteMetastoreResponse` dataclass. +* [Breaking] Change `get()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsGetMetastoreResponse` dataclass. +* [Breaking] Change `list()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsListMetastoresResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_metastores](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_metastores.html) account-level service to return `databricks.sdk.service.catalog.AccountsUpdateMetastoreResponse` dataclass. +* [Breaking] Change `create()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to return `databricks.sdk.service.catalog.AccountsCreateStorageCredentialInfo` dataclass. +* [Breaking] Change `delete()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to start returning `databricks.sdk.service.catalog.AccountsDeleteStorageCredentialResponse` dataclass. +* [Breaking] Change `update()` method for [a.account_storage_credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/catalog/account_storage_credentials.html) account-level service to return `databricks.sdk.service.catalog.AccountsUpdateStorageCredentialResponse` dataclass. +* [Breaking] Change `create()` method for [w.registered_models](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/registered_models.html) workspace-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.credentials](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/credentials.html) account-level service to start returning `databricks.sdk.service.provisioning.Credential` dataclass. +* [Breaking] Change `delete()` method for [a.encryption_keys](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/encryption_keys.html) account-level service to start returning `databricks.sdk.service.provisioning.CustomerManagedKey` dataclass. +* [Breaking] Change `create()` method for [a.networks](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/networks.html) account-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.networks](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/networks.html) account-level service to start returning `databricks.sdk.service.provisioning.Network` dataclass. +* [Breaking] Change `create()` and `replace()` methods for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service with new required argument order. +* [Breaking] Change `delete()` and `replace()` methods for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service to start returning `databricks.sdk.service.provisioning.PrivateAccessSettings` dataclass. +* [Breaking] Change `delete()` method for [a.storage](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/storage.html) account-level service to start returning `databricks.sdk.service.provisioning.StorageConfiguration` dataclass. +* [Breaking] Change `create()` method for [a.vpc_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/vpc_endpoints.html) account-level service with new required argument order. +* [Breaking] Change `delete()` method for [a.vpc_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/vpc_endpoints.html) account-level service to start returning `databricks.sdk.service.provisioning.VpcEndpoint` dataclass. +* [Breaking] Change `create()` and `update()` methods for [a.workspaces](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html) account-level service with new required argument order. +* [Breaking] Change `delete()` and `update()` methods for [a.workspaces](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html) account-level service to start returning `databricks.sdk.service.provisioning.Workspace` dataclass. +* [Breaking] Change `execute_statement()` method for [w.statement_execution](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/sql/statement_execution.html) workspace-level service . Method path has changed. +* [Breaking] Change `metastore_info` field for `databricks.sdk.service.catalog.AccountsCreateMetastore` to type `databricks.sdk.service.catalog.CreateAccountsMetastore` dataclass. +* [Breaking] Change `credential_info` field for `databricks.sdk.service.catalog.AccountsCreateStorageCredential` to type `databricks.sdk.service.catalog.CreateAccountsStorageCredential` dataclass. +* [Breaking] Change `metastore_info` field for `databricks.sdk.service.catalog.AccountsUpdateMetastore` to type `databricks.sdk.service.catalog.UpdateAccountsMetastore` dataclass. +* [Breaking] Change `credential_info` field for `databricks.sdk.service.catalog.AccountsUpdateStorageCredential` to type `databricks.sdk.service.catalog.UpdateAccountsStorageCredential` dataclass. +* Change `catalog_name`, `name` and `schema_name` fields for `databricks.sdk.service.catalog.CreateRegisteredModelRequest` to no longer be required. +* [Breaking] Change `name` field for `databricks.sdk.service.database.DatabaseInstanceRole` to be required. +* Change `name` field for `databricks.sdk.service.database.DatabaseInstanceRole` to be required. +* Change `network_name` field for `databricks.sdk.service.provisioning.CreateNetworkRequest` to no longer be required. +* Change `private_access_settings_name` and `region` fields for `databricks.sdk.service.provisioning.CreatePrivateAccessSettingsRequest` to no longer be required. +* Change `vpc_endpoint_name` field for `databricks.sdk.service.provisioning.CreateVpcEndpointRequest` to no longer be required. +* Change `workspace_name` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest` to no longer be required. +* Change `dataplane_relay` and `rest_api` fields for `databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required. +* [Breaking] Change `dataplane_relay` and `rest_api` fields for `databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required. +* [Breaking] Change waiter for [WorkspacesAPI.update](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/workspaces.html#databricks.sdk.service.provisioning.WorkspacesAPI.update) method. +* [Breaking] Remove `browse_only` field for `databricks.sdk.service.catalog.ModelVersionInfo`. +* [Breaking] Remove `jar_dependencies` field for `databricks.sdk.service.compute.Environment`. +* [Breaking] Remove `is_no_public_ip_enabled` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* [Breaking] Remove `allowed_vpc_endpoint_ids`, `private_access_level`, `private_access_settings_name`, `public_access_enabled` and `region` fields for `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* [Breaking] Remove `external_id` field for `databricks.sdk.service.provisioning.StsRole`. +* [Breaking] Remove `aws_region`, `credentials_id`, `custom_tags`, `managed_services_customer_managed_key_id`, `network_connectivity_config_id`, `network_id`, `private_access_settings_id`, `storage_configuration_id` and `storage_customer_managed_key_id` fields for `databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* [Breaking] Remove `external_customer_info` and `is_no_public_ip_enabled` fields for `databricks.sdk.service.provisioning.Workspace`. +* [Breaking] Remove `status_unspecified` enum value for `databricks.sdk.service.sql.Status`. +* Add `compute_mode` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* Add `expected_workspace_status` field for `databricks.sdk.service.provisioning.Workspace`. +* Add `dependency_storage_locations` field for `databricks.sdk.service.sharing.TableInternalAttributes`. +* Add `git_email` field for `databricks.sdk.service.workspace.CreateCredentialsRequest`. +* Add `git_email` field for `databricks.sdk.service.workspace.CreateCredentialsResponse`. +* Add `git_email` field for `databricks.sdk.service.workspace.CredentialInfo`. +* Add `git_email` field for `databricks.sdk.service.workspace.GetCredentialsResponse`. +* Add `git_email` field for `databricks.sdk.service.workspace.UpdateCredentialsRequest`. +* Add `germany_tisax` enum value for `databricks.sdk.service.settings.ComplianceStandard`. +* [Breaking] Remove `prediction_probability_column` field for `databricks.sdk.service.dataquality.InferenceLogConfig`. diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index ef42d1c34..8f510ed2d 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -21,6 +21,7 @@ from databricks.sdk.service import compute as pkg_compute from databricks.sdk.service import dashboards as pkg_dashboards from databricks.sdk.service import database as pkg_database +from databricks.sdk.service import dataquality as pkg_dataquality from databricks.sdk.service import files as pkg_files from databricks.sdk.service import iam as pkg_iam from databricks.sdk.service import iamv2 as pkg_iamv2 @@ -79,6 +80,7 @@ from databricks.sdk.service.dashboards import (GenieAPI, LakeviewAPI, LakeviewEmbeddedAPI) from databricks.sdk.service.database import DatabaseAPI +from databricks.sdk.service.dataquality import DataQualityAPI from databricks.sdk.service.files import DbfsAPI, FilesAPI from databricks.sdk.service.iam import (AccessControlAPI, AccountAccessControlAPI, @@ -282,6 +284,7 @@ def __init__( self._current_user = pkg_iam.CurrentUserAPI(self._api_client) self._dashboard_widgets = pkg_sql.DashboardWidgetsAPI(self._api_client) self._dashboards = pkg_sql.DashboardsAPI(self._api_client) + self._data_quality = pkg_dataquality.DataQualityAPI(self._api_client) self._data_sources = pkg_sql.DataSourcesAPI(self._api_client) self._database = pkg_database.DatabaseAPI(self._api_client) self._dbfs = DbfsExt(self._api_client) @@ -540,6 +543,11 @@ def dashboards(self) -> pkg_sql.DashboardsAPI: """In general, there is little need to modify dashboards using the API.""" return self._dashboards + @property + def data_quality(self) -> pkg_dataquality.DataQualityAPI: + """Manage the data quality of Unity Catalog objects (currently support `schema` and `table`).""" + return self._data_quality + @property def data_sources(self) -> pkg_sql.DataSourcesAPI: """This API is provided to assist you in making new query objects.""" diff --git a/databricks/sdk/service/agentbricks.py b/databricks/sdk/service/agentbricks.py index 25175acf0..b2a560e31 100755 --- a/databricks/sdk/service/agentbricks.py +++ b/databricks/sdk/service/agentbricks.py @@ -7,7 +7,7 @@ from enum import Enum from typing import Any, Dict, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index aeedb7146..bbe13675a 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") @@ -33,6 +35,8 @@ class App: budget_policy_id: Optional[str] = None + compute_size: Optional[ComputeSize] = None + compute_status: Optional[ComputeStatus] = None create_time: Optional[str] = None @@ -93,6 +97,8 @@ def as_dict(self) -> dict: body["app_status"] = self.app_status.as_dict() if self.budget_policy_id is not None: body["budget_policy_id"] = self.budget_policy_id + if self.compute_size is not None: + body["compute_size"] = self.compute_size.value if self.compute_status: body["compute_status"] = self.compute_status.as_dict() if self.create_time is not None: @@ -144,6 +150,8 @@ def as_shallow_dict(self) -> dict: body["app_status"] = self.app_status if self.budget_policy_id is not None: body["budget_policy_id"] = self.budget_policy_id + if self.compute_size is not None: + body["compute_size"] = self.compute_size if self.compute_status: body["compute_status"] = self.compute_status if self.create_time is not None: @@ -193,6 +201,7 @@ def from_dict(cls, d: Dict[str, Any]) -> App: active_deployment=_from_dict(d, "active_deployment", AppDeployment), app_status=_from_dict(d, "app_status", ApplicationStatus), budget_policy_id=d.get("budget_policy_id", None), + compute_size=_enum(d, "compute_size", ComputeSize), compute_status=_from_dict(d, "compute_status", ComputeStatus), create_time=d.get("create_time", None), creator=d.get("creator", None), @@ -918,6 +927,8 @@ class AppResource: description: Optional[str] = None """Description of the App Resource.""" + genie_space: Optional[AppResourceGenieSpace] = None + job: Optional[AppResourceJob] = None secret: Optional[AppResourceSecret] = None @@ -935,6 +946,8 @@ def as_dict(self) -> dict: body["database"] = self.database.as_dict() if self.description is not None: body["description"] = self.description + if self.genie_space: + body["genie_space"] = self.genie_space.as_dict() if self.job: body["job"] = self.job.as_dict() if self.name is not None: @@ -956,6 +969,8 @@ def as_shallow_dict(self) -> dict: body["database"] = self.database if self.description is not None: body["description"] = self.description + if self.genie_space: + body["genie_space"] = self.genie_space if self.job: body["job"] = self.job if self.name is not None: @@ -976,6 +991,7 @@ def from_dict(cls, d: Dict[str, Any]) -> AppResource: return cls( database=_from_dict(d, "database", AppResourceDatabase), description=d.get("description", None), + genie_space=_from_dict(d, "genie_space", AppResourceGenieSpace), job=_from_dict(d, "job", AppResourceJob), name=d.get("name", None), secret=_from_dict(d, "secret", AppResourceSecret), @@ -1030,6 +1046,54 @@ class AppResourceDatabaseDatabasePermission(Enum): CAN_CONNECT_AND_CREATE = "CAN_CONNECT_AND_CREATE" +@dataclass +class AppResourceGenieSpace: + name: str + + space_id: str + + permission: AppResourceGenieSpaceGenieSpacePermission + + def as_dict(self) -> dict: + """Serializes the AppResourceGenieSpace into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.permission is not None: + body["permission"] = self.permission.value + if self.space_id is not None: + body["space_id"] = self.space_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AppResourceGenieSpace into a shallow dictionary of its immediate attributes.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.permission is not None: + body["permission"] = self.permission + if self.space_id is not None: + body["space_id"] = self.space_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AppResourceGenieSpace: + """Deserializes the AppResourceGenieSpace from a dictionary.""" + return cls( + name=d.get("name", None), + permission=_enum(d, "permission", AppResourceGenieSpaceGenieSpacePermission), + space_id=d.get("space_id", None), + ) + + +class AppResourceGenieSpaceGenieSpacePermission(Enum): + + CAN_EDIT = "CAN_EDIT" + CAN_MANAGE = "CAN_MANAGE" + CAN_RUN = "CAN_RUN" + CAN_VIEW = "CAN_VIEW" + + @dataclass class AppResourceJob: id: str @@ -1259,6 +1323,112 @@ class AppResourceUcSecurableUcSecurableType(Enum): VOLUME = "VOLUME" +@dataclass +class AppUpdate: + budget_policy_id: Optional[str] = None + + compute_size: Optional[ComputeSize] = None + + description: Optional[str] = None + + resources: Optional[List[AppResource]] = None + + status: Optional[AppUpdateUpdateStatus] = None + + usage_policy_id: Optional[str] = None + + user_api_scopes: Optional[List[str]] = None + + def as_dict(self) -> dict: + """Serializes the AppUpdate into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id + if self.compute_size is not None: + body["compute_size"] = self.compute_size.value + if self.description is not None: + body["description"] = self.description + if self.resources: + body["resources"] = [v.as_dict() for v in self.resources] + if self.status: + body["status"] = self.status.as_dict() + if self.usage_policy_id is not None: + body["usage_policy_id"] = self.usage_policy_id + if self.user_api_scopes: + body["user_api_scopes"] = [v for v in self.user_api_scopes] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AppUpdate into a shallow dictionary of its immediate attributes.""" + body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id + if self.compute_size is not None: + body["compute_size"] = self.compute_size + if self.description is not None: + body["description"] = self.description + if self.resources: + body["resources"] = self.resources + if self.status: + body["status"] = self.status + if self.usage_policy_id is not None: + body["usage_policy_id"] = self.usage_policy_id + if self.user_api_scopes: + body["user_api_scopes"] = self.user_api_scopes + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AppUpdate: + """Deserializes the AppUpdate from a dictionary.""" + return cls( + budget_policy_id=d.get("budget_policy_id", None), + compute_size=_enum(d, "compute_size", ComputeSize), + description=d.get("description", None), + resources=_repeated_dict(d, "resources", AppResource), + status=_from_dict(d, "status", AppUpdateUpdateStatus), + usage_policy_id=d.get("usage_policy_id", None), + user_api_scopes=d.get("user_api_scopes", None), + ) + + +@dataclass +class AppUpdateUpdateStatus: + message: Optional[str] = None + + state: Optional[AppUpdateUpdateStatusUpdateState] = None + + def as_dict(self) -> dict: + """Serializes the AppUpdateUpdateStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.message is not None: + body["message"] = self.message + if self.state is not None: + body["state"] = self.state.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AppUpdateUpdateStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.message is not None: + body["message"] = self.message + if self.state is not None: + body["state"] = self.state + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AppUpdateUpdateStatus: + """Deserializes the AppUpdateUpdateStatus from a dictionary.""" + return cls(message=d.get("message", None), state=_enum(d, "state", AppUpdateUpdateStatusUpdateState)) + + +class AppUpdateUpdateStatusUpdateState(Enum): + + FAILED = "FAILED" + IN_PROGRESS = "IN_PROGRESS" + NOT_UPDATED = "NOT_UPDATED" + SUCCEEDED = "SUCCEEDED" + + class ApplicationState(Enum): CRASHED = "CRASHED" @@ -1299,6 +1469,13 @@ def from_dict(cls, d: Dict[str, Any]) -> ApplicationStatus: return cls(message=d.get("message", None), state=_enum(d, "state", ApplicationState)) +class ComputeSize(Enum): + + LARGE = "LARGE" + LIQUID = "LIQUID" + MEDIUM = "MEDIUM" + + class ComputeState(Enum): ACTIVE = "ACTIVE" @@ -1582,6 +1759,37 @@ def wait_get_app_active( attempt += 1 raise TimeoutError(f"timed out after {timeout}: {status_message}") + def wait_get_update_app_succeeded( + self, app_name: str, timeout=timedelta(minutes=20), callback: Optional[Callable[[AppUpdate], None]] = None + ) -> AppUpdate: + deadline = time.time() + timeout.total_seconds() + target_states = (AppUpdateUpdateStatusUpdateState.SUCCEEDED,) + failure_states = (AppUpdateUpdateStatusUpdateState.FAILED,) + status_message = "polling..." + attempt = 1 + while time.time() < deadline: + poll = self.get_update(app_name=app_name) + status = poll.status.state + status_message = f"current status: {status}" + if poll.status: + status_message = poll.status.message + if status in target_states: + return poll + if callback: + callback(poll) + if status in failure_states: + msg = f"failed to reach SUCCEEDED, got {status}: {status_message}" + raise OperationFailed(msg) + prefix = f"app_name={app_name}" + sleep = attempt + if sleep > 10: + # sleep 10s max per attempt + sleep = 10 + _LOG.debug(f"{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)") + time.sleep(sleep + random.random()) + attempt += 1 + raise TimeoutError(f"timed out after {timeout}: {status_message}") + def wait_get_deployment_app_succeeded( self, app_name: str, @@ -1674,6 +1882,45 @@ def create(self, app: App, *, no_compute: Optional[bool] = None) -> Wait[App]: def create_and_wait(self, app: App, *, no_compute: Optional[bool] = None, timeout=timedelta(minutes=20)) -> App: return self.create(app=app, no_compute=no_compute).result(timeout=timeout) + def create_update(self, app_name: str, update_mask: str, *, app: Optional[App] = None) -> Wait[AppUpdate]: + """Creates an app update and starts the update process. The update process is asynchronous and the status + of the update can be checked with the GetAppUpdate method. + + :param app_name: str + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + :param app: :class:`App` (optional) + + :returns: + Long-running operation waiter for :class:`AppUpdate`. + See :method:wait_get_update_app_succeeded for more details. + """ + body = {} + if app is not None: + body["app"] = app.as_dict() + if update_mask is not None: + body["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + op_response = self._api.do("POST", f"/api/2.0/apps/{app_name}/update", body=body, headers=headers) + return Wait(self.wait_get_update_app_succeeded, response=AppUpdate.from_dict(op_response), app_name=app_name) + + def create_update_and_wait( + self, app_name: str, update_mask: str, *, app: Optional[App] = None, timeout=timedelta(minutes=20) + ) -> AppUpdate: + return self.create_update(app=app, app_name=app_name, update_mask=update_mask).result(timeout=timeout) + def delete(self, name: str) -> App: """Deletes an app. @@ -1787,6 +2034,22 @@ def get_permissions(self, app_name: str) -> AppPermissions: res = self._api.do("GET", f"/api/2.0/permissions/apps/{app_name}", headers=headers) return AppPermissions.from_dict(res) + def get_update(self, app_name: str) -> AppUpdate: + """Gets the status of an app update. + + :param app_name: str + The name of the app. + + :returns: :class:`AppUpdate` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/apps/{app_name}/update", headers=headers) + return AppUpdate.from_dict(res) + def list(self, *, page_size: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[App]: """Lists all apps in the workspace. diff --git a/databricks/sdk/service/billing.py b/databricks/sdk/service/billing.py index 2e118457a..3758028c2 100755 --- a/databricks/sdk/service/billing.py +++ b/databricks/sdk/service/billing.py @@ -7,13 +7,12 @@ from enum import Enum from typing import Any, BinaryIO, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict +from databricks.sdk.service import compute +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") -from databricks.sdk.service import compute - # all definitions in this file are in alphabetical order diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 3ac709a89..e0036200a 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict, _repeated_enum) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") @@ -63,8 +65,191 @@ def from_dict(cls, d: Dict[str, Any]) -> AccessRequestDestinations: ) +@dataclass +class AccountsCreateMetastoreAssignmentResponse: + """The metastore assignment was successfully created.""" + + def as_dict(self) -> dict: + """Serializes the AccountsCreateMetastoreAssignmentResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsCreateMetastoreAssignmentResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsCreateMetastoreAssignmentResponse: + """Deserializes the AccountsCreateMetastoreAssignmentResponse from a dictionary.""" + return cls() + + +@dataclass +class AccountsCreateMetastoreResponse: + metastore_info: Optional[MetastoreInfo] = None + + def as_dict(self) -> dict: + """Serializes the AccountsCreateMetastoreResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metastore_info: + body["metastore_info"] = self.metastore_info.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsCreateMetastoreResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metastore_info: + body["metastore_info"] = self.metastore_info + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsCreateMetastoreResponse: + """Deserializes the AccountsCreateMetastoreResponse from a dictionary.""" + return cls(metastore_info=_from_dict(d, "metastore_info", MetastoreInfo)) + + +@dataclass +class AccountsCreateStorageCredentialInfo: + credential_info: Optional[StorageCredentialInfo] = None + + def as_dict(self) -> dict: + """Serializes the AccountsCreateStorageCredentialInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.credential_info: + body["credential_info"] = self.credential_info.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsCreateStorageCredentialInfo into a shallow dictionary of its immediate attributes.""" + body = {} + if self.credential_info: + body["credential_info"] = self.credential_info + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsCreateStorageCredentialInfo: + """Deserializes the AccountsCreateStorageCredentialInfo from a dictionary.""" + return cls(credential_info=_from_dict(d, "credential_info", StorageCredentialInfo)) + + +@dataclass +class AccountsDeleteMetastoreAssignmentResponse: + """The metastore assignment was successfully deleted.""" + + def as_dict(self) -> dict: + """Serializes the AccountsDeleteMetastoreAssignmentResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsDeleteMetastoreAssignmentResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsDeleteMetastoreAssignmentResponse: + """Deserializes the AccountsDeleteMetastoreAssignmentResponse from a dictionary.""" + return cls() + + +@dataclass +class AccountsDeleteMetastoreResponse: + """The metastore was successfully deleted.""" + + def as_dict(self) -> dict: + """Serializes the AccountsDeleteMetastoreResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsDeleteMetastoreResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsDeleteMetastoreResponse: + """Deserializes the AccountsDeleteMetastoreResponse from a dictionary.""" + return cls() + + +@dataclass +class AccountsDeleteStorageCredentialResponse: + """The storage credential was successfully deleted.""" + + def as_dict(self) -> dict: + """Serializes the AccountsDeleteStorageCredentialResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsDeleteStorageCredentialResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsDeleteStorageCredentialResponse: + """Deserializes the AccountsDeleteStorageCredentialResponse from a dictionary.""" + return cls() + + +@dataclass +class AccountsGetMetastoreResponse: + """The metastore was successfully returned.""" + + metastore_info: Optional[MetastoreInfo] = None + + def as_dict(self) -> dict: + """Serializes the AccountsGetMetastoreResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metastore_info: + body["metastore_info"] = self.metastore_info.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsGetMetastoreResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metastore_info: + body["metastore_info"] = self.metastore_info + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsGetMetastoreResponse: + """Deserializes the AccountsGetMetastoreResponse from a dictionary.""" + return cls(metastore_info=_from_dict(d, "metastore_info", MetastoreInfo)) + + +@dataclass +class AccountsListMetastoresResponse: + """Metastores were returned successfully.""" + + metastores: Optional[List[MetastoreInfo]] = None + """An array of metastore information objects.""" + + def as_dict(self) -> dict: + """Serializes the AccountsListMetastoresResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metastores: + body["metastores"] = [v.as_dict() for v in self.metastores] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsListMetastoresResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metastores: + body["metastores"] = self.metastores + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsListMetastoresResponse: + """Deserializes the AccountsListMetastoresResponse from a dictionary.""" + return cls(metastores=_repeated_dict(d, "metastores", MetastoreInfo)) + + @dataclass class AccountsMetastoreAssignment: + """The workspace metastore assignment was successfully returned.""" + metastore_assignment: Optional[MetastoreAssignment] = None def as_dict(self) -> dict: @@ -88,50 +273,100 @@ def from_dict(cls, d: Dict[str, Any]) -> AccountsMetastoreAssignment: @dataclass -class AccountsMetastoreInfo: +class AccountsStorageCredentialInfo: + """The storage credential was successfully retrieved.""" + + credential_info: Optional[StorageCredentialInfo] = None + + def as_dict(self) -> dict: + """Serializes the AccountsStorageCredentialInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.credential_info: + body["credential_info"] = self.credential_info.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsStorageCredentialInfo into a shallow dictionary of its immediate attributes.""" + body = {} + if self.credential_info: + body["credential_info"] = self.credential_info + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsStorageCredentialInfo: + """Deserializes the AccountsStorageCredentialInfo from a dictionary.""" + return cls(credential_info=_from_dict(d, "credential_info", StorageCredentialInfo)) + + +@dataclass +class AccountsUpdateMetastoreAssignmentResponse: + """The metastore assignment was successfully updated.""" + + def as_dict(self) -> dict: + """Serializes the AccountsUpdateMetastoreAssignmentResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AccountsUpdateMetastoreAssignmentResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AccountsUpdateMetastoreAssignmentResponse: + """Deserializes the AccountsUpdateMetastoreAssignmentResponse from a dictionary.""" + return cls() + + +@dataclass +class AccountsUpdateMetastoreResponse: + """The metastore update request succeeded.""" + metastore_info: Optional[MetastoreInfo] = None def as_dict(self) -> dict: - """Serializes the AccountsMetastoreInfo into a dictionary suitable for use as a JSON request body.""" + """Serializes the AccountsUpdateMetastoreResponse into a dictionary suitable for use as a JSON request body.""" body = {} if self.metastore_info: body["metastore_info"] = self.metastore_info.as_dict() return body def as_shallow_dict(self) -> dict: - """Serializes the AccountsMetastoreInfo into a shallow dictionary of its immediate attributes.""" + """Serializes the AccountsUpdateMetastoreResponse into a shallow dictionary of its immediate attributes.""" body = {} if self.metastore_info: body["metastore_info"] = self.metastore_info return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> AccountsMetastoreInfo: - """Deserializes the AccountsMetastoreInfo from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> AccountsUpdateMetastoreResponse: + """Deserializes the AccountsUpdateMetastoreResponse from a dictionary.""" return cls(metastore_info=_from_dict(d, "metastore_info", MetastoreInfo)) @dataclass -class AccountsStorageCredentialInfo: +class AccountsUpdateStorageCredentialResponse: + """The storage credential was successfully updated.""" + credential_info: Optional[StorageCredentialInfo] = None def as_dict(self) -> dict: - """Serializes the AccountsStorageCredentialInfo into a dictionary suitable for use as a JSON request body.""" + """Serializes the AccountsUpdateStorageCredentialResponse into a dictionary suitable for use as a JSON request body.""" body = {} if self.credential_info: body["credential_info"] = self.credential_info.as_dict() return body def as_shallow_dict(self) -> dict: - """Serializes the AccountsStorageCredentialInfo into a shallow dictionary of its immediate attributes.""" + """Serializes the AccountsUpdateStorageCredentialResponse into a shallow dictionary of its immediate attributes.""" body = {} if self.credential_info: body["credential_info"] = self.credential_info return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> AccountsStorageCredentialInfo: - """Deserializes the AccountsStorageCredentialInfo from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> AccountsUpdateStorageCredentialResponse: + """Deserializes the AccountsUpdateStorageCredentialResponse from a dictionary.""" return cls(credential_info=_from_dict(d, "credential_info", StorageCredentialInfo)) @@ -1666,58 +1901,185 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateAccessRequestResponse: @dataclass -class CreateFunction: +class CreateAccountsMetastore: name: str - """Name of function, relative to parent schema.""" - - catalog_name: str - """Name of parent catalog.""" - - schema_name: str - """Name of parent schema relative to its parent catalog.""" + """The user-specified name of the metastore.""" - input_params: FunctionParameterInfos + region: Optional[str] = None + """Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).""" - data_type: ColumnTypeName - """Scalar function return data type.""" + storage_root: Optional[str] = None + """The storage root URL for metastore""" - full_data_type: str - """Pretty printed function data type.""" + def as_dict(self) -> dict: + """Serializes the CreateAccountsMetastore into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.region is not None: + body["region"] = self.region + if self.storage_root is not None: + body["storage_root"] = self.storage_root + return body - routine_body: CreateFunctionRoutineBody - """Function language. When **EXTERNAL** is used, the language of the routine function should be - specified in the __external_language__ field, and the __return_params__ of the function cannot - be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be - **NO_SQL**.""" + def as_shallow_dict(self) -> dict: + """Serializes the CreateAccountsMetastore into a shallow dictionary of its immediate attributes.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.region is not None: + body["region"] = self.region + if self.storage_root is not None: + body["storage_root"] = self.storage_root + return body - routine_definition: str - """Function body.""" + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CreateAccountsMetastore: + """Deserializes the CreateAccountsMetastore from a dictionary.""" + return cls(name=d.get("name", None), region=d.get("region", None), storage_root=d.get("storage_root", None)) - parameter_style: CreateFunctionParameterStyle - """Function parameter style. **S** is the value for SQL.""" - is_deterministic: bool - """Whether the function is deterministic.""" +@dataclass +class CreateAccountsStorageCredential: + name: str + """The credential name. The name must be unique among storage and service credentials within the + metastore.""" - sql_data_access: CreateFunctionSqlDataAccess - """Function SQL data access.""" + aws_iam_role: Optional[AwsIamRoleRequest] = None + """The AWS IAM role configuration.""" - is_null_call: bool - """Function null call.""" + azure_managed_identity: Optional[AzureManagedIdentityRequest] = None + """The Azure managed identity configuration.""" - security_type: CreateFunctionSecurityType - """Function security type.""" + azure_service_principal: Optional[AzureServicePrincipal] = None + """The Azure service principal configuration.""" - specific_name: str - """Specific name of the function; Reserved for future use.""" + cloudflare_api_token: Optional[CloudflareApiToken] = None + """The Cloudflare API token configuration.""" comment: Optional[str] = None - """User-provided free-form text description.""" + """Comment associated with the credential.""" - external_language: Optional[str] = None - """External function language.""" + databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None + """The Databricks managed GCP service account configuration.""" - external_name: Optional[str] = None + read_only: Optional[bool] = None + """Whether the credential is usable only for read operations. Only applicable when purpose is + **STORAGE**.""" + + def as_dict(self) -> dict: + """Serializes the CreateAccountsStorageCredential into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_iam_role: + body["aws_iam_role"] = self.aws_iam_role.as_dict() + if self.azure_managed_identity: + body["azure_managed_identity"] = self.azure_managed_identity.as_dict() + if self.azure_service_principal: + body["azure_service_principal"] = self.azure_service_principal.as_dict() + if self.cloudflare_api_token: + body["cloudflare_api_token"] = self.cloudflare_api_token.as_dict() + if self.comment is not None: + body["comment"] = self.comment + if self.databricks_gcp_service_account: + body["databricks_gcp_service_account"] = self.databricks_gcp_service_account.as_dict() + if self.name is not None: + body["name"] = self.name + if self.read_only is not None: + body["read_only"] = self.read_only + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CreateAccountsStorageCredential into a shallow dictionary of its immediate attributes.""" + body = {} + if self.aws_iam_role: + body["aws_iam_role"] = self.aws_iam_role + if self.azure_managed_identity: + body["azure_managed_identity"] = self.azure_managed_identity + if self.azure_service_principal: + body["azure_service_principal"] = self.azure_service_principal + if self.cloudflare_api_token: + body["cloudflare_api_token"] = self.cloudflare_api_token + if self.comment is not None: + body["comment"] = self.comment + if self.databricks_gcp_service_account: + body["databricks_gcp_service_account"] = self.databricks_gcp_service_account + if self.name is not None: + body["name"] = self.name + if self.read_only is not None: + body["read_only"] = self.read_only + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CreateAccountsStorageCredential: + """Deserializes the CreateAccountsStorageCredential from a dictionary.""" + return cls( + aws_iam_role=_from_dict(d, "aws_iam_role", AwsIamRoleRequest), + azure_managed_identity=_from_dict(d, "azure_managed_identity", AzureManagedIdentityRequest), + azure_service_principal=_from_dict(d, "azure_service_principal", AzureServicePrincipal), + cloudflare_api_token=_from_dict(d, "cloudflare_api_token", CloudflareApiToken), + comment=d.get("comment", None), + databricks_gcp_service_account=_from_dict( + d, "databricks_gcp_service_account", DatabricksGcpServiceAccountRequest + ), + name=d.get("name", None), + read_only=d.get("read_only", None), + ) + + +@dataclass +class CreateFunction: + name: str + """Name of function, relative to parent schema.""" + + catalog_name: str + """Name of parent Catalog.""" + + schema_name: str + """Name of parent Schema relative to its parent Catalog.""" + + input_params: FunctionParameterInfos + """Function input parameters.""" + + data_type: ColumnTypeName + """Scalar function return data type.""" + + full_data_type: str + """Pretty printed function data type.""" + + routine_body: CreateFunctionRoutineBody + """Function language. When **EXTERNAL** is used, the language of the routine function should be + specified in the **external_language** field, and the **return_params** of the function cannot + be used (as **TABLE** return type is not supported), and the **sql_data_access** field must be + **NO_SQL**.""" + + routine_definition: str + """Function body.""" + + parameter_style: CreateFunctionParameterStyle + """Function parameter style. **S** is the value for SQL.""" + + is_deterministic: bool + """Whether the function is deterministic.""" + + sql_data_access: CreateFunctionSqlDataAccess + """Function SQL data access.""" + + is_null_call: bool + """Function null call.""" + + security_type: CreateFunctionSecurityType + """Function security type.""" + + specific_name: str + """Specific name of the function; Reserved for future use.""" + + comment: Optional[str] = None + """User-provided free-form text description.""" + + external_language: Optional[str] = None + """External function language.""" + + external_name: Optional[str] = None """External function name.""" properties: Optional[str] = None @@ -1727,7 +2089,7 @@ class CreateFunction: """Table function return parameters.""" routine_dependencies: Optional[DependencyList] = None - """Function dependencies.""" + """function dependencies.""" sql_path: Optional[str] = None """List of schemes whose objects can be referenced without qualification.""" @@ -1855,74 +2217,28 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateFunction: class CreateFunctionParameterStyle(Enum): - """Function parameter style. **S** is the value for SQL.""" S = "S" class CreateFunctionRoutineBody(Enum): - """Function language. When **EXTERNAL** is used, the language of the routine function should be - specified in the __external_language__ field, and the __return_params__ of the function cannot - be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be - **NO_SQL**.""" EXTERNAL = "EXTERNAL" SQL = "SQL" class CreateFunctionSecurityType(Enum): - """The security type of the function.""" DEFINER = "DEFINER" class CreateFunctionSqlDataAccess(Enum): - """Function SQL data access.""" CONTAINS_SQL = "CONTAINS_SQL" NO_SQL = "NO_SQL" READS_SQL_DATA = "READS_SQL_DATA" -@dataclass -class CreateMetastore: - name: str - """The user-specified name of the metastore.""" - - region: Optional[str] = None - """Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).""" - - storage_root: Optional[str] = None - """The storage root URL for metastore""" - - def as_dict(self) -> dict: - """Serializes the CreateMetastore into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.name is not None: - body["name"] = self.name - if self.region is not None: - body["region"] = self.region - if self.storage_root is not None: - body["storage_root"] = self.storage_root - return body - - def as_shallow_dict(self) -> dict: - """Serializes the CreateMetastore into a shallow dictionary of its immediate attributes.""" - body = {} - if self.name is not None: - body["name"] = self.name - if self.region is not None: - body["region"] = self.region - if self.storage_root is not None: - body["storage_root"] = self.storage_root - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreateMetastore: - """Deserializes the CreateMetastore from a dictionary.""" - return cls(name=d.get("name", None), region=d.get("region", None), storage_root=d.get("storage_root", None)) - - @dataclass class CreateMetastoreAssignment: metastore_id: str @@ -2026,119 +2342,6 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateRequestExternalLineage: ) -@dataclass -class CreateResponse: - def as_dict(self) -> dict: - """Serializes the CreateResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the CreateResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreateResponse: - """Deserializes the CreateResponse from a dictionary.""" - return cls() - - -@dataclass -class CreateStorageCredential: - name: str - """The credential name. The name must be unique among storage and service credentials within the - metastore.""" - - aws_iam_role: Optional[AwsIamRoleRequest] = None - """The AWS IAM role configuration.""" - - azure_managed_identity: Optional[AzureManagedIdentityRequest] = None - """The Azure managed identity configuration.""" - - azure_service_principal: Optional[AzureServicePrincipal] = None - """The Azure service principal configuration.""" - - cloudflare_api_token: Optional[CloudflareApiToken] = None - """The Cloudflare API token configuration.""" - - comment: Optional[str] = None - """Comment associated with the credential.""" - - databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None - """The Databricks managed GCP service account configuration.""" - - read_only: Optional[bool] = None - """Whether the credential is usable only for read operations. Only applicable when purpose is - **STORAGE**.""" - - skip_validation: Optional[bool] = None - """Supplying true to this argument skips validation of the created credential.""" - - def as_dict(self) -> dict: - """Serializes the CreateStorageCredential into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.aws_iam_role: - body["aws_iam_role"] = self.aws_iam_role.as_dict() - if self.azure_managed_identity: - body["azure_managed_identity"] = self.azure_managed_identity.as_dict() - if self.azure_service_principal: - body["azure_service_principal"] = self.azure_service_principal.as_dict() - if self.cloudflare_api_token: - body["cloudflare_api_token"] = self.cloudflare_api_token.as_dict() - if self.comment is not None: - body["comment"] = self.comment - if self.databricks_gcp_service_account: - body["databricks_gcp_service_account"] = self.databricks_gcp_service_account.as_dict() - if self.name is not None: - body["name"] = self.name - if self.read_only is not None: - body["read_only"] = self.read_only - if self.skip_validation is not None: - body["skip_validation"] = self.skip_validation - return body - - def as_shallow_dict(self) -> dict: - """Serializes the CreateStorageCredential into a shallow dictionary of its immediate attributes.""" - body = {} - if self.aws_iam_role: - body["aws_iam_role"] = self.aws_iam_role - if self.azure_managed_identity: - body["azure_managed_identity"] = self.azure_managed_identity - if self.azure_service_principal: - body["azure_service_principal"] = self.azure_service_principal - if self.cloudflare_api_token: - body["cloudflare_api_token"] = self.cloudflare_api_token - if self.comment is not None: - body["comment"] = self.comment - if self.databricks_gcp_service_account: - body["databricks_gcp_service_account"] = self.databricks_gcp_service_account - if self.name is not None: - body["name"] = self.name - if self.read_only is not None: - body["read_only"] = self.read_only - if self.skip_validation is not None: - body["skip_validation"] = self.skip_validation - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreateStorageCredential: - """Deserializes the CreateStorageCredential from a dictionary.""" - return cls( - aws_iam_role=_from_dict(d, "aws_iam_role", AwsIamRoleRequest), - azure_managed_identity=_from_dict(d, "azure_managed_identity", AzureManagedIdentityRequest), - azure_service_principal=_from_dict(d, "azure_service_principal", AzureServicePrincipal), - cloudflare_api_token=_from_dict(d, "cloudflare_api_token", CloudflareApiToken), - comment=d.get("comment", None), - databricks_gcp_service_account=_from_dict( - d, "databricks_gcp_service_account", DatabricksGcpServiceAccountRequest - ), - name=d.get("name", None), - read_only=d.get("read_only", None), - skip_validation=d.get("skip_validation", None), - ) - - @dataclass class CredentialDependency: """A credential that is dependent on a SQL object.""" @@ -4143,7 +4346,7 @@ class FunctionInfo: through the BROWSE privilege when include_browse is enabled in the request.""" catalog_name: Optional[str] = None - """Name of parent catalog.""" + """Name of parent Catalog.""" comment: Optional[str] = None """User-provided free-form text description.""" @@ -4167,12 +4370,13 @@ class FunctionInfo: """Pretty printed function data type.""" full_name: Optional[str] = None - """Full name of function, in form of __catalog_name__.__schema_name__.__function__name__""" + """Full name of Function, in form of **catalog_name**.**schema_name**.**function_name**""" function_id: Optional[str] = None """Id of Function, relative to parent schema.""" input_params: Optional[FunctionParameterInfos] = None + """Function input parameters.""" is_deterministic: Optional[bool] = None """Whether the function is deterministic.""" @@ -4187,7 +4391,7 @@ class FunctionInfo: """Name of function, relative to parent schema.""" owner: Optional[str] = None - """Username of current owner of function.""" + """Username of current owner of the function.""" parameter_style: Optional[FunctionInfoParameterStyle] = None """Function parameter style. **S** is the value for SQL.""" @@ -4200,18 +4404,18 @@ class FunctionInfo: routine_body: Optional[FunctionInfoRoutineBody] = None """Function language. When **EXTERNAL** is used, the language of the routine function should be - specified in the __external_language__ field, and the __return_params__ of the function cannot - be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be + specified in the **external_language** field, and the **return_params** of the function cannot + be used (as **TABLE** return type is not supported), and the **sql_data_access** field must be **NO_SQL**.""" routine_definition: Optional[str] = None """Function body.""" routine_dependencies: Optional[DependencyList] = None - """Function dependencies.""" + """function dependencies.""" schema_name: Optional[str] = None - """Name of parent schema relative to its parent catalog.""" + """Name of parent Schema relative to its parent Catalog.""" security_type: Optional[FunctionInfoSecurityType] = None """Function security type.""" @@ -4226,10 +4430,10 @@ class FunctionInfo: """List of schemes whose objects can be referenced without qualification.""" updated_at: Optional[int] = None - """Time at which this function was created, in epoch milliseconds.""" + """Time at which this function was last modified, in epoch milliseconds.""" updated_by: Optional[str] = None - """Username of user who last modified function.""" + """Username of user who last modified the function.""" def as_dict(self) -> dict: """Serializes the FunctionInfo into a dictionary suitable for use as a JSON request body.""" @@ -4399,29 +4603,22 @@ def from_dict(cls, d: Dict[str, Any]) -> FunctionInfo: class FunctionInfoParameterStyle(Enum): - """Function parameter style. **S** is the value for SQL.""" S = "S" class FunctionInfoRoutineBody(Enum): - """Function language. When **EXTERNAL** is used, the language of the routine function should be - specified in the __external_language__ field, and the __return_params__ of the function cannot - be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be - **NO_SQL**.""" EXTERNAL = "EXTERNAL" SQL = "SQL" class FunctionInfoSecurityType(Enum): - """The security type of the function.""" DEFINER = "DEFINER" class FunctionInfoSqlDataAccess(Enum): - """Function SQL data access.""" CONTAINS_SQL = "CONTAINS_SQL" NO_SQL = "NO_SQL" @@ -4431,12 +4628,13 @@ class FunctionInfoSqlDataAccess(Enum): @dataclass class FunctionParameterInfo: name: str - """Name of parameter.""" + """Name of Parameter.""" type_text: str """Full data type spec, SQL/catalogString text.""" type_name: ColumnTypeName + """Name of type (INT, STRUCT, MAP, etc.)""" position: int """Ordinal position of column (starting at position 0).""" @@ -4448,8 +4646,10 @@ class FunctionParameterInfo: """Default value of the parameter.""" parameter_mode: Optional[FunctionParameterMode] = None + """Function parameter mode.""" parameter_type: Optional[FunctionParameterType] = None + """Function parameter type.""" type_interval_type: Optional[str] = None """Format of IntervalType.""" @@ -4543,7 +4743,6 @@ def from_dict(cls, d: Dict[str, Any]) -> FunctionParameterInfo: @dataclass class FunctionParameterInfos: parameters: Optional[List[FunctionParameterInfo]] = None - """The array of __FunctionParameterInfo__ definitions of the function's parameters.""" def as_dict(self) -> dict: """Serializes the FunctionParameterInfos into a dictionary suitable for use as a JSON request body.""" @@ -4566,13 +4765,11 @@ def from_dict(cls, d: Dict[str, Any]) -> FunctionParameterInfos: class FunctionParameterMode(Enum): - """The mode of the function parameter.""" IN = "IN" class FunctionParameterType(Enum): - """The type of function parameter.""" COLUMN = "COLUMN" PARAM = "PARAM" @@ -5153,7 +5350,7 @@ class LineageDirection(Enum): @dataclass class ListAccountMetastoreAssignmentsResponse: - """The list of workspaces to which the given metastore is assigned.""" + """The metastore assignments were successfully returned.""" workspace_ids: Optional[List[int]] = None @@ -5179,6 +5376,8 @@ def from_dict(cls, d: Dict[str, Any]) -> ListAccountMetastoreAssignmentsResponse @dataclass class ListAccountStorageCredentialsResponse: + """The metastore storage credentials were successfully returned.""" + storage_credentials: Optional[List[StorageCredentialInfo]] = None """An array of metastore storage credentials.""" @@ -5896,7 +6095,8 @@ class MetastoreAssignment: """The unique ID of the metastore.""" default_catalog_name: Optional[str] = None - """The name of the default catalog in the metastore.""" + """The name of the default catalog in the metastore. This field is deprecated. Please use "Default + Namespace API" to configure the default catalog for a Databricks workspace.""" def as_dict(self) -> dict: """Serializes the MetastoreAssignment into a dictionary suitable for use as a JSON request body.""" @@ -6113,10 +6313,6 @@ class ModelVersionInfo: aliases: Optional[List[RegisteredModelAlias]] = None """List of aliases associated with the model version""" - browse_only: Optional[bool] = None - """Indicates whether the principal is limited to retrieving metadata for the associated object - through the BROWSE privilege when include_browse is enabled in the request.""" - catalog_name: Optional[str] = None """The name of the catalog containing the model version""" @@ -6175,8 +6371,6 @@ def as_dict(self) -> dict: body = {} if self.aliases: body["aliases"] = [v.as_dict() for v in self.aliases] - if self.browse_only is not None: - body["browse_only"] = self.browse_only if self.catalog_name is not None: body["catalog_name"] = self.catalog_name if self.comment is not None: @@ -6218,8 +6412,6 @@ def as_shallow_dict(self) -> dict: body = {} if self.aliases: body["aliases"] = self.aliases - if self.browse_only is not None: - body["browse_only"] = self.browse_only if self.catalog_name is not None: body["catalog_name"] = self.catalog_name if self.comment is not None: @@ -6261,7 +6453,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ModelVersionInfo: """Deserializes the ModelVersionInfo from a dictionary.""" return cls( aliases=_repeated_dict(d, "aliases", RegisteredModelAlias), - browse_only=d.get("browse_only", None), catalog_name=d.get("catalog_name", None), comment=d.get("comment", None), created_at=d.get("created_at", None), @@ -6283,11 +6474,9 @@ def from_dict(cls, d: Dict[str, Any]) -> ModelVersionInfo: class ModelVersionInfoStatus(Enum): - """Current status of the model version. Newly created model versions start in PENDING_REGISTRATION - status, then move to READY status once the model version files are uploaded and the model - version is finalized. Only model versions in READY status can be loaded for inference or served.""" FAILED_REGISTRATION = "FAILED_REGISTRATION" + MODEL_VERSION_STATUS_UNKNOWN = "MODEL_VERSION_STATUS_UNKNOWN" PENDING_REGISTRATION = "PENDING_REGISTRATION" READY = "READY" @@ -8125,11 +8314,21 @@ def from_dict(cls, d: Dict[str, Any]) -> RegenerateDashboardResponse: @dataclass class RegisteredModelAlias: - """Registered model alias.""" - alias_name: Optional[str] = None """Name of the alias, e.g. 'champion' or 'latest_stable'""" + catalog_name: Optional[str] = None + """The name of the catalog containing the model version""" + + id: Optional[str] = None + """The unique identifier of the alias""" + + model_name: Optional[str] = None + """The name of the parent registered model of the model version, relative to parent schema""" + + schema_name: Optional[str] = None + """The name of the schema containing the model version, relative to parent catalog""" + version_num: Optional[int] = None """Integer version number of the model version to which this alias points.""" @@ -8138,6 +8337,14 @@ def as_dict(self) -> dict: body = {} if self.alias_name is not None: body["alias_name"] = self.alias_name + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.id is not None: + body["id"] = self.id + if self.model_name is not None: + body["model_name"] = self.model_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name if self.version_num is not None: body["version_num"] = self.version_num return body @@ -8147,6 +8354,14 @@ def as_shallow_dict(self) -> dict: body = {} if self.alias_name is not None: body["alias_name"] = self.alias_name + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.id is not None: + body["id"] = self.id + if self.model_name is not None: + body["model_name"] = self.model_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name if self.version_num is not None: body["version_num"] = self.version_num return body @@ -8154,7 +8369,14 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> RegisteredModelAlias: """Deserializes the RegisteredModelAlias from a dictionary.""" - return cls(alias_name=d.get("alias_name", None), version_num=d.get("version_num", None)) + return cls( + alias_name=d.get("alias_name", None), + catalog_name=d.get("catalog_name", None), + id=d.get("id", None), + model_name=d.get("model_name", None), + schema_name=d.get("schema_name", None), + version_num=d.get("version_num", None), + ) @dataclass @@ -8540,7 +8762,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: CONNECTION_SHAREPOINT_OAUTH_M2M = 264; Next id:265""" + """Latest kind: CONNECTION_REDSHIFT_IAM = 265; Next id:266""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" @@ -9636,50 +9858,7 @@ def from_dict(cls, d: Dict[str, Any]) -> UnassignResponse: @dataclass -class UpdateAssignmentResponse: - def as_dict(self) -> dict: - """Serializes the UpdateAssignmentResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the UpdateAssignmentResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateAssignmentResponse: - """Deserializes the UpdateAssignmentResponse from a dictionary.""" - return cls() - - -@dataclass -class UpdateCatalogWorkspaceBindingsResponse: - workspaces: Optional[List[int]] = None - """A list of workspace IDs""" - - def as_dict(self) -> dict: - """Serializes the UpdateCatalogWorkspaceBindingsResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.workspaces: - body["workspaces"] = [v for v in self.workspaces] - return body - - def as_shallow_dict(self) -> dict: - """Serializes the UpdateCatalogWorkspaceBindingsResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.workspaces: - body["workspaces"] = self.workspaces - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateCatalogWorkspaceBindingsResponse: - """Deserializes the UpdateCatalogWorkspaceBindingsResponse from a dictionary.""" - return cls(workspaces=d.get("workspaces", None)) - - -@dataclass -class UpdateMetastore: +class UpdateAccountsMetastore: delta_sharing_organization_name: Optional[str] = None """The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.""" @@ -9690,12 +9869,6 @@ class UpdateMetastore: delta_sharing_scope: Optional[DeltaSharingScopeEnum] = None """The scope of Delta Sharing enabled for the metastore.""" - id: Optional[str] = None - """Unique ID of the metastore.""" - - new_name: Optional[str] = None - """New name for the metastore.""" - owner: Optional[str] = None """The owner of the metastore.""" @@ -9706,7 +9879,7 @@ class UpdateMetastore: """UUID of storage credential to access the metastore storage_root.""" def as_dict(self) -> dict: - """Serializes the UpdateMetastore into a dictionary suitable for use as a JSON request body.""" + """Serializes the UpdateAccountsMetastore into a dictionary suitable for use as a JSON request body.""" body = {} if self.delta_sharing_organization_name is not None: body["delta_sharing_organization_name"] = self.delta_sharing_organization_name @@ -9716,10 +9889,6 @@ def as_dict(self) -> dict: ) if self.delta_sharing_scope is not None: body["delta_sharing_scope"] = self.delta_sharing_scope.value - if self.id is not None: - body["id"] = self.id - if self.new_name is not None: - body["new_name"] = self.new_name if self.owner is not None: body["owner"] = self.owner if self.privilege_model_version is not None: @@ -9729,7 +9898,7 @@ def as_dict(self) -> dict: return body def as_shallow_dict(self) -> dict: - """Serializes the UpdateMetastore into a shallow dictionary of its immediate attributes.""" + """Serializes the UpdateAccountsMetastore into a shallow dictionary of its immediate attributes.""" body = {} if self.delta_sharing_organization_name is not None: body["delta_sharing_organization_name"] = self.delta_sharing_organization_name @@ -9739,10 +9908,6 @@ def as_shallow_dict(self) -> dict: ) if self.delta_sharing_scope is not None: body["delta_sharing_scope"] = self.delta_sharing_scope - if self.id is not None: - body["id"] = self.id - if self.new_name is not None: - body["new_name"] = self.new_name if self.owner is not None: body["owner"] = self.owner if self.privilege_model_version is not None: @@ -9752,22 +9917,157 @@ def as_shallow_dict(self) -> dict: return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateMetastore: - """Deserializes the UpdateMetastore from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> UpdateAccountsMetastore: + """Deserializes the UpdateAccountsMetastore from a dictionary.""" return cls( delta_sharing_organization_name=d.get("delta_sharing_organization_name", None), delta_sharing_recipient_token_lifetime_in_seconds=d.get( "delta_sharing_recipient_token_lifetime_in_seconds", None ), delta_sharing_scope=_enum(d, "delta_sharing_scope", DeltaSharingScopeEnum), - id=d.get("id", None), - new_name=d.get("new_name", None), owner=d.get("owner", None), privilege_model_version=d.get("privilege_model_version", None), storage_root_credential_id=d.get("storage_root_credential_id", None), ) +@dataclass +class UpdateAccountsStorageCredential: + aws_iam_role: Optional[AwsIamRoleRequest] = None + """The AWS IAM role configuration.""" + + azure_managed_identity: Optional[AzureManagedIdentityResponse] = None + """The Azure managed identity configuration.""" + + azure_service_principal: Optional[AzureServicePrincipal] = None + """The Azure service principal configuration.""" + + cloudflare_api_token: Optional[CloudflareApiToken] = None + """The Cloudflare API token configuration.""" + + comment: Optional[str] = None + """Comment associated with the credential.""" + + databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None + """The Databricks managed GCP service account configuration.""" + + isolation_mode: Optional[IsolationMode] = None + """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" + + owner: Optional[str] = None + """Username of current owner of credential.""" + + read_only: Optional[bool] = None + """Whether the credential is usable only for read operations. Only applicable when purpose is + **STORAGE**.""" + + def as_dict(self) -> dict: + """Serializes the UpdateAccountsStorageCredential into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_iam_role: + body["aws_iam_role"] = self.aws_iam_role.as_dict() + if self.azure_managed_identity: + body["azure_managed_identity"] = self.azure_managed_identity.as_dict() + if self.azure_service_principal: + body["azure_service_principal"] = self.azure_service_principal.as_dict() + if self.cloudflare_api_token: + body["cloudflare_api_token"] = self.cloudflare_api_token.as_dict() + if self.comment is not None: + body["comment"] = self.comment + if self.databricks_gcp_service_account: + body["databricks_gcp_service_account"] = self.databricks_gcp_service_account.as_dict() + if self.isolation_mode is not None: + body["isolation_mode"] = self.isolation_mode.value + if self.owner is not None: + body["owner"] = self.owner + if self.read_only is not None: + body["read_only"] = self.read_only + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateAccountsStorageCredential into a shallow dictionary of its immediate attributes.""" + body = {} + if self.aws_iam_role: + body["aws_iam_role"] = self.aws_iam_role + if self.azure_managed_identity: + body["azure_managed_identity"] = self.azure_managed_identity + if self.azure_service_principal: + body["azure_service_principal"] = self.azure_service_principal + if self.cloudflare_api_token: + body["cloudflare_api_token"] = self.cloudflare_api_token + if self.comment is not None: + body["comment"] = self.comment + if self.databricks_gcp_service_account: + body["databricks_gcp_service_account"] = self.databricks_gcp_service_account + if self.isolation_mode is not None: + body["isolation_mode"] = self.isolation_mode + if self.owner is not None: + body["owner"] = self.owner + if self.read_only is not None: + body["read_only"] = self.read_only + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateAccountsStorageCredential: + """Deserializes the UpdateAccountsStorageCredential from a dictionary.""" + return cls( + aws_iam_role=_from_dict(d, "aws_iam_role", AwsIamRoleRequest), + azure_managed_identity=_from_dict(d, "azure_managed_identity", AzureManagedIdentityResponse), + azure_service_principal=_from_dict(d, "azure_service_principal", AzureServicePrincipal), + cloudflare_api_token=_from_dict(d, "cloudflare_api_token", CloudflareApiToken), + comment=d.get("comment", None), + databricks_gcp_service_account=_from_dict( + d, "databricks_gcp_service_account", DatabricksGcpServiceAccountRequest + ), + isolation_mode=_enum(d, "isolation_mode", IsolationMode), + owner=d.get("owner", None), + read_only=d.get("read_only", None), + ) + + +@dataclass +class UpdateAssignmentResponse: + def as_dict(self) -> dict: + """Serializes the UpdateAssignmentResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateAssignmentResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateAssignmentResponse: + """Deserializes the UpdateAssignmentResponse from a dictionary.""" + return cls() + + +@dataclass +class UpdateCatalogWorkspaceBindingsResponse: + workspaces: Optional[List[int]] = None + """A list of workspace IDs""" + + def as_dict(self) -> dict: + """Serializes the UpdateCatalogWorkspaceBindingsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.workspaces: + body["workspaces"] = [v for v in self.workspaces] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateCatalogWorkspaceBindingsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.workspaces: + body["workspaces"] = self.workspaces + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateCatalogWorkspaceBindingsResponse: + """Deserializes the UpdateCatalogWorkspaceBindingsResponse from a dictionary.""" + return cls(workspaces=d.get("workspaces", None)) + + @dataclass class UpdateMetastoreAssignment: default_catalog_name: Optional[str] = None @@ -9860,184 +10160,58 @@ def as_dict(self) -> dict: if self.columns: body["columns"] = [v.as_dict() for v in self.columns] if self.id is not None: - body["id"] = self.id - if self.properties: - body["properties"] = self.properties - if self.source: - body["source"] = self.source.as_dict() - if self.target: - body["target"] = self.target.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the UpdateRequestExternalLineage into a shallow dictionary of its immediate attributes.""" - body = {} - if self.columns: - body["columns"] = self.columns - if self.id is not None: - body["id"] = self.id - if self.properties: - body["properties"] = self.properties - if self.source: - body["source"] = self.source - if self.target: - body["target"] = self.target - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateRequestExternalLineage: - """Deserializes the UpdateRequestExternalLineage from a dictionary.""" - return cls( - columns=_repeated_dict(d, "columns", ColumnRelationship), - id=d.get("id", None), - properties=d.get("properties", None), - source=_from_dict(d, "source", ExternalLineageObject), - target=_from_dict(d, "target", ExternalLineageObject), - ) - - -@dataclass -class UpdateResponse: - def as_dict(self) -> dict: - """Serializes the UpdateResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the UpdateResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateResponse: - """Deserializes the UpdateResponse from a dictionary.""" - return cls() - - -@dataclass -class UpdateStorageCredential: - aws_iam_role: Optional[AwsIamRoleRequest] = None - """The AWS IAM role configuration.""" - - azure_managed_identity: Optional[AzureManagedIdentityResponse] = None - """The Azure managed identity configuration.""" - - azure_service_principal: Optional[AzureServicePrincipal] = None - """The Azure service principal configuration.""" - - cloudflare_api_token: Optional[CloudflareApiToken] = None - """The Cloudflare API token configuration.""" - - comment: Optional[str] = None - """Comment associated with the credential.""" - - databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountRequest] = None - """The Databricks managed GCP service account configuration.""" - - force: Optional[bool] = None - """Force update even if there are dependent external locations or external tables.""" - - isolation_mode: Optional[IsolationMode] = None - """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" - - name: Optional[str] = None - """Name of the storage credential.""" - - new_name: Optional[str] = None - """New name for the storage credential.""" + body["id"] = self.id + if self.properties: + body["properties"] = self.properties + if self.source: + body["source"] = self.source.as_dict() + if self.target: + body["target"] = self.target.as_dict() + return body - owner: Optional[str] = None - """Username of current owner of credential.""" + def as_shallow_dict(self) -> dict: + """Serializes the UpdateRequestExternalLineage into a shallow dictionary of its immediate attributes.""" + body = {} + if self.columns: + body["columns"] = self.columns + if self.id is not None: + body["id"] = self.id + if self.properties: + body["properties"] = self.properties + if self.source: + body["source"] = self.source + if self.target: + body["target"] = self.target + return body - read_only: Optional[bool] = None - """Whether the credential is usable only for read operations. Only applicable when purpose is - **STORAGE**.""" + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateRequestExternalLineage: + """Deserializes the UpdateRequestExternalLineage from a dictionary.""" + return cls( + columns=_repeated_dict(d, "columns", ColumnRelationship), + id=d.get("id", None), + properties=d.get("properties", None), + source=_from_dict(d, "source", ExternalLineageObject), + target=_from_dict(d, "target", ExternalLineageObject), + ) - skip_validation: Optional[bool] = None - """Supplying true to this argument skips validation of the updated credential.""" +@dataclass +class UpdateResponse: def as_dict(self) -> dict: - """Serializes the UpdateStorageCredential into a dictionary suitable for use as a JSON request body.""" + """Serializes the UpdateResponse into a dictionary suitable for use as a JSON request body.""" body = {} - if self.aws_iam_role: - body["aws_iam_role"] = self.aws_iam_role.as_dict() - if self.azure_managed_identity: - body["azure_managed_identity"] = self.azure_managed_identity.as_dict() - if self.azure_service_principal: - body["azure_service_principal"] = self.azure_service_principal.as_dict() - if self.cloudflare_api_token: - body["cloudflare_api_token"] = self.cloudflare_api_token.as_dict() - if self.comment is not None: - body["comment"] = self.comment - if self.databricks_gcp_service_account: - body["databricks_gcp_service_account"] = self.databricks_gcp_service_account.as_dict() - if self.force is not None: - body["force"] = self.force - if self.isolation_mode is not None: - body["isolation_mode"] = self.isolation_mode.value - if self.name is not None: - body["name"] = self.name - if self.new_name is not None: - body["new_name"] = self.new_name - if self.owner is not None: - body["owner"] = self.owner - if self.read_only is not None: - body["read_only"] = self.read_only - if self.skip_validation is not None: - body["skip_validation"] = self.skip_validation return body def as_shallow_dict(self) -> dict: - """Serializes the UpdateStorageCredential into a shallow dictionary of its immediate attributes.""" + """Serializes the UpdateResponse into a shallow dictionary of its immediate attributes.""" body = {} - if self.aws_iam_role: - body["aws_iam_role"] = self.aws_iam_role - if self.azure_managed_identity: - body["azure_managed_identity"] = self.azure_managed_identity - if self.azure_service_principal: - body["azure_service_principal"] = self.azure_service_principal - if self.cloudflare_api_token: - body["cloudflare_api_token"] = self.cloudflare_api_token - if self.comment is not None: - body["comment"] = self.comment - if self.databricks_gcp_service_account: - body["databricks_gcp_service_account"] = self.databricks_gcp_service_account - if self.force is not None: - body["force"] = self.force - if self.isolation_mode is not None: - body["isolation_mode"] = self.isolation_mode - if self.name is not None: - body["name"] = self.name - if self.new_name is not None: - body["new_name"] = self.new_name - if self.owner is not None: - body["owner"] = self.owner - if self.read_only is not None: - body["read_only"] = self.read_only - if self.skip_validation is not None: - body["skip_validation"] = self.skip_validation return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateStorageCredential: - """Deserializes the UpdateStorageCredential from a dictionary.""" - return cls( - aws_iam_role=_from_dict(d, "aws_iam_role", AwsIamRoleRequest), - azure_managed_identity=_from_dict(d, "azure_managed_identity", AzureManagedIdentityResponse), - azure_service_principal=_from_dict(d, "azure_service_principal", AzureServicePrincipal), - cloudflare_api_token=_from_dict(d, "cloudflare_api_token", CloudflareApiToken), - comment=d.get("comment", None), - databricks_gcp_service_account=_from_dict( - d, "databricks_gcp_service_account", DatabricksGcpServiceAccountRequest - ), - force=d.get("force", None), - isolation_mode=_enum(d, "isolation_mode", IsolationMode), - name=d.get("name", None), - new_name=d.get("new_name", None), - owner=d.get("owner", None), - read_only=d.get("read_only", None), - skip_validation=d.get("skip_validation", None), - ) + def from_dict(cls, d: Dict[str, Any]) -> UpdateResponse: + """Deserializes the UpdateResponse from a dictionary.""" + return cls() @dataclass @@ -10415,7 +10589,7 @@ def __init__(self, api_client): def create( self, workspace_id: int, metastore_id: str, *, metastore_assignment: Optional[CreateMetastoreAssignment] = None - ): + ) -> AccountsCreateMetastoreAssignmentResponse: """Creates an assignment to a metastore for a workspace :param workspace_id: int @@ -10424,7 +10598,7 @@ def create( Unity Catalog metastore ID :param metastore_assignment: :class:`CreateMetastoreAssignment` (optional) - + :returns: :class:`AccountsCreateMetastoreAssignmentResponse` """ body = {} if metastore_assignment is not None: @@ -10434,14 +10608,15 @@ def create( "Content-Type": "application/json", } - self._api.do( + res = self._api.do( "POST", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}/metastores/{metastore_id}", body=body, headers=headers, ) + return AccountsCreateMetastoreAssignmentResponse.from_dict(res) - def delete(self, workspace_id: int, metastore_id: str): + def delete(self, workspace_id: int, metastore_id: str) -> AccountsDeleteMetastoreAssignmentResponse: """Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. :param workspace_id: int @@ -10449,23 +10624,24 @@ def delete(self, workspace_id: int, metastore_id: str): :param metastore_id: str Unity Catalog metastore ID - + :returns: :class:`AccountsDeleteMetastoreAssignmentResponse` """ headers = { "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}/metastores/{metastore_id}", headers=headers, ) + return AccountsDeleteMetastoreAssignmentResponse.from_dict(res) def get(self, workspace_id: int) -> AccountsMetastoreAssignment: """Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is assigned - a metastore, the mappig will be returned. If no metastore is assigned to the workspace, the assignment - will not be found and a 404 returned. + a metastore, the mapping will be returned. If no metastore is assigned to the workspace, the + assignment will not be found and a 404 returned. :param workspace_id: int Workspace ID. @@ -10503,7 +10679,7 @@ def list(self, metastore_id: str) -> Iterator[int]: def update( self, workspace_id: int, metastore_id: str, *, metastore_assignment: Optional[UpdateMetastoreAssignment] = None - ): + ) -> AccountsUpdateMetastoreAssignmentResponse: """Updates an assignment to a metastore for a workspace. Currently, only the default catalog may be updated. @@ -10513,7 +10689,7 @@ def update( Unity Catalog metastore ID :param metastore_assignment: :class:`UpdateMetastoreAssignment` (optional) - + :returns: :class:`AccountsUpdateMetastoreAssignmentResponse` """ body = {} if metastore_assignment is not None: @@ -10523,12 +10699,13 @@ def update( "Content-Type": "application/json", } - self._api.do( + res = self._api.do( "PUT", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}/metastores/{metastore_id}", body=body, headers=headers, ) + return AccountsUpdateMetastoreAssignmentResponse.from_dict(res) class AccountMetastoresAPI: @@ -10538,12 +10715,12 @@ class AccountMetastoresAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, metastore_info: Optional[CreateMetastore] = None) -> AccountsMetastoreInfo: + def create(self, *, metastore_info: Optional[CreateAccountsMetastore] = None) -> AccountsCreateMetastoreResponse: """Creates a Unity Catalog metastore. - :param metastore_info: :class:`CreateMetastore` (optional) + :param metastore_info: :class:`CreateAccountsMetastore` (optional) - :returns: :class:`AccountsMetastoreInfo` + :returns: :class:`AccountsCreateMetastoreResponse` """ body = {} if metastore_info is not None: @@ -10554,9 +10731,9 @@ def create(self, *, metastore_info: Optional[CreateMetastore] = None) -> Account } res = self._api.do("POST", f"/api/2.0/accounts/{self._api.account_id}/metastores", body=body, headers=headers) - return AccountsMetastoreInfo.from_dict(res) + return AccountsCreateMetastoreResponse.from_dict(res) - def delete(self, metastore_id: str, *, force: Optional[bool] = None): + def delete(self, metastore_id: str, *, force: Optional[bool] = None) -> AccountsDeleteMetastoreResponse: """Deletes a Unity Catalog metastore for an account, both specified by ID. :param metastore_id: str @@ -10564,7 +10741,7 @@ def delete(self, metastore_id: str, *, force: Optional[bool] = None): :param force: bool (optional) Force deletion even if the metastore is not empty. Default is false. - + :returns: :class:`AccountsDeleteMetastoreResponse` """ query = {} @@ -10574,20 +10751,21 @@ def delete(self, metastore_id: str, *, force: Optional[bool] = None): "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/metastores/{metastore_id}", query=query, headers=headers, ) + return AccountsDeleteMetastoreResponse.from_dict(res) - def get(self, metastore_id: str) -> AccountsMetastoreInfo: + def get(self, metastore_id: str) -> AccountsGetMetastoreResponse: """Gets a Unity Catalog metastore from an account, both specified by ID. :param metastore_id: str Unity Catalog metastore ID - :returns: :class:`AccountsMetastoreInfo` + :returns: :class:`AccountsGetMetastoreResponse` """ headers = { @@ -10597,7 +10775,7 @@ def get(self, metastore_id: str) -> AccountsMetastoreInfo: res = self._api.do( "GET", f"/api/2.0/accounts/{self._api.account_id}/metastores/{metastore_id}", headers=headers ) - return AccountsMetastoreInfo.from_dict(res) + return AccountsGetMetastoreResponse.from_dict(res) def list(self) -> Iterator[MetastoreInfo]: """Gets all Unity Catalog metastores associated with an account specified by ID. @@ -10611,17 +10789,20 @@ def list(self) -> Iterator[MetastoreInfo]: } json = self._api.do("GET", f"/api/2.0/accounts/{self._api.account_id}/metastores", headers=headers) - parsed = ListMetastoresResponse.from_dict(json).metastores + parsed = AccountsListMetastoresResponse.from_dict(json).metastores return parsed if parsed is not None else [] - def update(self, metastore_id: str, *, metastore_info: Optional[UpdateMetastore] = None) -> AccountsMetastoreInfo: + def update( + self, metastore_id: str, *, metastore_info: Optional[UpdateAccountsMetastore] = None + ) -> AccountsUpdateMetastoreResponse: """Updates an existing Unity Catalog metastore. :param metastore_id: str Unity Catalog metastore ID - :param metastore_info: :class:`UpdateMetastore` (optional) + :param metastore_info: :class:`UpdateAccountsMetastore` (optional) + Properties of the metastore to change. - :returns: :class:`AccountsMetastoreInfo` + :returns: :class:`AccountsUpdateMetastoreResponse` """ body = {} if metastore_info is not None: @@ -10634,7 +10815,7 @@ def update(self, metastore_id: str, *, metastore_info: Optional[UpdateMetastore] res = self._api.do( "PUT", f"/api/2.0/accounts/{self._api.account_id}/metastores/{metastore_id}", body=body, headers=headers ) - return AccountsMetastoreInfo.from_dict(res) + return AccountsUpdateMetastoreResponse.from_dict(res) class AccountStorageCredentialsAPI: @@ -10644,25 +10825,33 @@ def __init__(self, api_client): self._api = api_client def create( - self, metastore_id: str, *, credential_info: Optional[CreateStorageCredential] = None - ) -> AccountsStorageCredentialInfo: - """Creates a new storage credential. The request object is specific to the cloud: - - * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials * - **GcpServiceAcountKey** for GCP credentials. + self, + metastore_id: str, + *, + credential_info: Optional[CreateAccountsStorageCredential] = None, + skip_validation: Optional[bool] = None, + ) -> AccountsCreateStorageCredentialInfo: + """Creates a new storage credential. The request object is specific to the cloud: - **AwsIamRole** for + AWS credentials - **AzureServicePrincipal** for Azure credentials - **GcpServiceAccountKey** for GCP + credentials - The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the + The caller must be a metastore admin and have the `CREATE_STORAGE_CREDENTIAL` privilege on the metastore. :param metastore_id: str Unity Catalog metastore ID - :param credential_info: :class:`CreateStorageCredential` (optional) + :param credential_info: :class:`CreateAccountsStorageCredential` (optional) + :param skip_validation: bool (optional) + Optional, default false. Supplying true to this argument skips validation of the created set of + credentials. - :returns: :class:`AccountsStorageCredentialInfo` + :returns: :class:`AccountsCreateStorageCredentialInfo` """ body = {} if credential_info is not None: body["credential_info"] = credential_info.as_dict() + if skip_validation is not None: + body["skip_validation"] = skip_validation headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -10674,9 +10863,11 @@ def create( body=body, headers=headers, ) - return AccountsStorageCredentialInfo.from_dict(res) + return AccountsCreateStorageCredentialInfo.from_dict(res) - def delete(self, metastore_id: str, storage_credential_name: str, *, force: Optional[bool] = None): + def delete( + self, metastore_id: str, storage_credential_name: str, *, force: Optional[bool] = None + ) -> AccountsDeleteStorageCredentialResponse: """Deletes a storage credential from the metastore. The caller must be an owner of the storage credential. @@ -10687,7 +10878,7 @@ def delete(self, metastore_id: str, storage_credential_name: str, *, force: Opti :param force: bool (optional) Force deletion even if the Storage Credential is not empty. Default is false. - + :returns: :class:`AccountsDeleteStorageCredentialResponse` """ query = {} @@ -10697,12 +10888,13 @@ def delete(self, metastore_id: str, storage_credential_name: str, *, force: Opti "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/metastores/{metastore_id}/storage-credentials/{storage_credential_name}", query=query, headers=headers, ) + return AccountsDeleteStorageCredentialResponse.from_dict(res) def get(self, metastore_id: str, storage_credential_name: str) -> AccountsStorageCredentialInfo: """Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the @@ -10711,7 +10903,7 @@ def get(self, metastore_id: str, storage_credential_name: str) -> AccountsStorag :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str - Name of the storage credential. + Required. Name of the storage credential. :returns: :class:`AccountsStorageCredentialInfo` """ @@ -10753,22 +10945,27 @@ def update( metastore_id: str, storage_credential_name: str, *, - credential_info: Optional[UpdateStorageCredential] = None, - ) -> AccountsStorageCredentialInfo: + credential_info: Optional[UpdateAccountsStorageCredential] = None, + skip_validation: Optional[bool] = None, + ) -> AccountsUpdateStorageCredentialResponse: """Updates a storage credential on the metastore. The caller must be the owner of the storage credential. - If the caller is a metastore admin, only the __owner__ credential can be changed. + If the caller is a metastore admin, only the **owner** credential can be changed. :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str Name of the storage credential. - :param credential_info: :class:`UpdateStorageCredential` (optional) + :param credential_info: :class:`UpdateAccountsStorageCredential` (optional) + :param skip_validation: bool (optional) + Optional. Supplying true to this argument skips validation of the updated set of credentials. - :returns: :class:`AccountsStorageCredentialInfo` + :returns: :class:`AccountsUpdateStorageCredentialResponse` """ body = {} if credential_info is not None: body["credential_info"] = credential_info.as_dict() + if skip_validation is not None: + body["skip_validation"] = skip_validation headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -10780,7 +10977,7 @@ def update( body=body, headers=headers, ) - return AccountsStorageCredentialInfo.from_dict(res) + return AccountsUpdateStorageCredentialResponse.from_dict(res) class ArtifactAllowlistsAPI: @@ -10973,6 +11170,7 @@ def list( self, *, include_browse: Optional[bool] = None, + include_unbound: Optional[bool] = None, max_results: Optional[int] = None, page_token: Optional[str] = None, ) -> Iterator[CatalogInfo]: @@ -10981,9 +11179,20 @@ def list( **USE_CATALOG** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param include_browse: bool (optional) Whether to include catalogs in the response for which the principal can only access selective metadata for + :param include_unbound: bool (optional) + Whether to include catalogs not bound to the workspace. Effective only if the user has permission to + update the catalog–workspace binding. :param max_results: int (optional) Maximum number of catalogs to return. - when set to 0, the page length is set to a server configured value (recommended); - when set to a value greater than 0, the page length is the minimum of this @@ -11001,6 +11210,8 @@ def list( query = {} if include_browse is not None: query["include_browse"] = include_browse + if include_unbound is not None: + query["include_unbound"] = include_unbound if max_results is not None: query["max_results"] = max_results if page_token is not None: @@ -11174,6 +11385,14 @@ def get(self, name: str) -> ConnectionInfo: def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[ConnectionInfo]: """List all connections. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param max_results: int (optional) Maximum number of connections to return. - If not set, all connections are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this value and @@ -11390,6 +11609,7 @@ def get_credential(self, name_arg: str) -> CredentialInfo: def list_credentials( self, *, + include_unbound: Optional[bool] = None, max_results: Optional[int] = None, page_token: Optional[str] = None, purpose: Optional[CredentialPurpose] = None, @@ -11400,6 +11620,9 @@ def list_credentials( is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of the elements in the array. + :param include_unbound: bool (optional) + Whether to include credentials not bound to the workspace. Effective only if the user has permission + to update the credential–workspace binding. :param max_results: int (optional) Maximum number of credentials to return. - If not set, the default max page size is used. - When set to a value greater than 0, the page length is the minimum of this value and a server-configured @@ -11414,6 +11637,8 @@ def list_credentials( """ query = {} + if include_unbound is not None: + query["include_unbound"] = include_unbound if max_results is not None: query["max_results"] = max_results if page_token is not None: @@ -12037,6 +12262,7 @@ def list( self, *, include_browse: Optional[bool] = None, + include_unbound: Optional[bool] = None, max_results: Optional[int] = None, page_token: Optional[str] = None, ) -> Iterator[ExternalLocationInfo]: @@ -12044,9 +12270,20 @@ def list( must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param include_browse: bool (optional) Whether to include external locations in the response for which the principal can only access selective metadata for + :param include_unbound: bool (optional) + Whether to include external locations not bound to the workspace. Effective only if the user has + permission to update the location–workspace binding. :param max_results: int (optional) Maximum number of external locations to return. If not set, all the external locations are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this @@ -12061,6 +12298,8 @@ def list( query = {} if include_browse is not None: query["include_browse"] = include_browse + if include_unbound is not None: + query["include_unbound"] = include_unbound if max_results is not None: query["max_results"] = max_results if page_token is not None: @@ -12346,7 +12585,7 @@ def delete(self, name: str, *, force: Optional[bool] = None): :param name: str The fully-qualified name of the function (of the form - __catalog_name__.__schema_name__.__function__name__). + __catalog_name__.__schema_name__.__function__name__) . :param force: bool (optional) Force deletion even if the function is notempty. @@ -12356,9 +12595,7 @@ def delete(self, name: str, *, force: Optional[bool] = None): query = {} if force is not None: query["force"] = force - headers = { - "Accept": "application/json", - } + headers = {} self._api.do("DELETE", f"/api/2.1/unity-catalog/functions/{name}", query=query, headers=headers) @@ -12405,6 +12642,14 @@ def list( functions for which either the user has the **EXECUTE** privilege or the user is the owner. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param catalog_name: str Name of parent catalog for functions of interest. :param schema_name: str @@ -12459,7 +12704,7 @@ def update(self, name: str, *, owner: Optional[str] = None) -> FunctionInfo: The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). :param owner: str (optional) - Username of current owner of function. + Username of current owner of the function. :returns: :class:`FunctionInfo` """ @@ -12749,6 +12994,14 @@ def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = """Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param max_results: int (optional) Maximum number of metastores to return. - when set to a value greater than 0, the page length is the minimum of this value and a server configured value; - when set to 0, the page length is set to a @@ -13069,7 +13322,29 @@ def list( return query["page_token"] = json["next_page_token"] - def update(self, full_name: str, version: int, *, comment: Optional[str] = None) -> ModelVersionInfo: + def update( + self, + full_name: str, + version: int, + *, + aliases: Optional[List[RegisteredModelAlias]] = None, + catalog_name: Optional[str] = None, + comment: Optional[str] = None, + created_at: Optional[int] = None, + created_by: Optional[str] = None, + id: Optional[str] = None, + metastore_id: Optional[str] = None, + model_name: Optional[str] = None, + model_version_dependencies: Optional[DependencyList] = None, + run_id: Optional[str] = None, + run_workspace_id: Optional[int] = None, + schema_name: Optional[str] = None, + source: Optional[str] = None, + status: Optional[ModelVersionInfoStatus] = None, + storage_location: Optional[str] = None, + updated_at: Optional[int] = None, + updated_by: Optional[str] = None, + ) -> ModelVersionInfo: """Updates the specified model version. The caller must be a metastore admin or an owner of the parent registered model. For the latter case, @@ -13082,14 +13357,80 @@ def update(self, full_name: str, version: int, *, comment: Optional[str] = None) The three-level (fully qualified) name of the model version :param version: int The integer version number of the model version + :param aliases: List[:class:`RegisteredModelAlias`] (optional) + List of aliases associated with the model version + :param catalog_name: str (optional) + The name of the catalog containing the model version :param comment: str (optional) The comment attached to the model version + :param created_at: int (optional) + :param created_by: str (optional) + The identifier of the user who created the model version + :param id: str (optional) + The unique identifier of the model version + :param metastore_id: str (optional) + The unique identifier of the metastore containing the model version + :param model_name: str (optional) + The name of the parent registered model of the model version, relative to parent schema + :param model_version_dependencies: :class:`DependencyList` (optional) + Model version dependencies, for feature-store packaged models + :param run_id: str (optional) + MLflow run ID used when creating the model version, if ``source`` was generated by an experiment run + stored in an MLflow tracking server + :param run_workspace_id: int (optional) + ID of the Databricks workspace containing the MLflow run that generated this model version, if + applicable + :param schema_name: str (optional) + The name of the schema containing the model version, relative to parent catalog + :param source: str (optional) + URI indicating the location of the source artifacts (files) for the model version + :param status: :class:`ModelVersionInfoStatus` (optional) + Current status of the model version. Newly created model versions start in PENDING_REGISTRATION + status, then move to READY status once the model version files are uploaded and the model version is + finalized. Only model versions in READY status can be loaded for inference or served. + :param storage_location: str (optional) + The storage location on the cloud under which model version data files are stored + :param updated_at: int (optional) + :param updated_by: str (optional) + The identifier of the user who updated the model version last time :returns: :class:`ModelVersionInfo` """ body = {} + if aliases is not None: + body["aliases"] = [v.as_dict() for v in aliases] + if catalog_name is not None: + body["catalog_name"] = catalog_name if comment is not None: body["comment"] = comment + if created_at is not None: + body["created_at"] = created_at + if created_by is not None: + body["created_by"] = created_by + if id is not None: + body["id"] = id + if metastore_id is not None: + body["metastore_id"] = metastore_id + if model_name is not None: + body["model_name"] = model_name + if model_version_dependencies is not None: + body["model_version_dependencies"] = model_version_dependencies.as_dict() + if run_id is not None: + body["run_id"] = run_id + if run_workspace_id is not None: + body["run_workspace_id"] = run_workspace_id + if schema_name is not None: + body["schema_name"] = schema_name + if source is not None: + body["source"] = source + if status is not None: + body["status"] = status.value + if storage_location is not None: + body["storage_location"] = storage_location + if updated_at is not None: + body["updated_at"] = updated_at + if updated_by is not None: + body["updated_by"] = updated_by headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -13797,20 +14138,29 @@ class RegisteredModelsAPI: new model version, or update permissions on the registered model, users must be owners of the registered model. - Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants) that - specify a securable type, use "FUNCTION" as the securable type.""" + Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants) that specify + a securable type, use FUNCTION as the securable type.""" def __init__(self, api_client): self._api = api_client def create( self, - catalog_name: str, - schema_name: str, - name: str, *, + aliases: Optional[List[RegisteredModelAlias]] = None, + browse_only: Optional[bool] = None, + catalog_name: Optional[str] = None, comment: Optional[str] = None, + created_at: Optional[int] = None, + created_by: Optional[str] = None, + full_name: Optional[str] = None, + metastore_id: Optional[str] = None, + name: Optional[str] = None, + owner: Optional[str] = None, + schema_name: Optional[str] = None, storage_location: Optional[str] = None, + updated_at: Optional[int] = None, + updated_by: Optional[str] = None, ) -> RegisteredModelInfo: """Creates a new registered model in Unity Catalog. @@ -13822,30 +14172,67 @@ def create( **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent schema. - :param catalog_name: str + :param aliases: List[:class:`RegisteredModelAlias`] (optional) + List of aliases associated with the registered model + :param browse_only: bool (optional) + Indicates whether the principal is limited to retrieving metadata for the associated object through + the BROWSE privilege when include_browse is enabled in the request. + :param catalog_name: str (optional) The name of the catalog where the schema and the registered model reside - :param schema_name: str - The name of the schema where the registered model resides - :param name: str - The name of the registered model :param comment: str (optional) The comment attached to the registered model + :param created_at: int (optional) + Creation timestamp of the registered model in milliseconds since the Unix epoch + :param created_by: str (optional) + The identifier of the user who created the registered model + :param full_name: str (optional) + The three-level (fully qualified) name of the registered model + :param metastore_id: str (optional) + The unique identifier of the metastore + :param name: str (optional) + The name of the registered model + :param owner: str (optional) + The identifier of the user who owns the registered model + :param schema_name: str (optional) + The name of the schema where the registered model resides :param storage_location: str (optional) The storage location on the cloud under which model version data files are stored + :param updated_at: int (optional) + Last-update timestamp of the registered model in milliseconds since the Unix epoch + :param updated_by: str (optional) + The identifier of the user who updated the registered model last time :returns: :class:`RegisteredModelInfo` """ body = {} + if aliases is not None: + body["aliases"] = [v.as_dict() for v in aliases] + if browse_only is not None: + body["browse_only"] = browse_only if catalog_name is not None: body["catalog_name"] = catalog_name if comment is not None: body["comment"] = comment + if created_at is not None: + body["created_at"] = created_at + if created_by is not None: + body["created_by"] = created_by + if full_name is not None: + body["full_name"] = full_name + if metastore_id is not None: + body["metastore_id"] = metastore_id if name is not None: body["name"] = name + if owner is not None: + body["owner"] = owner if schema_name is not None: body["schema_name"] = schema_name if storage_location is not None: body["storage_location"] = storage_location + if updated_at is not None: + body["updated_at"] = updated_at + if updated_by is not None: + body["updated_by"] = updated_by headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -14003,7 +14390,7 @@ def set_alias(self, full_name: str, alias: str, version_num: int) -> RegisteredM **USE_SCHEMA** privilege on the parent schema. :param full_name: str - Full name of the registered model + The three-level (fully qualified) name of the registered model :param alias: str The name of the alias :param version_num: int @@ -14028,9 +14415,20 @@ def update( self, full_name: str, *, + aliases: Optional[List[RegisteredModelAlias]] = None, + browse_only: Optional[bool] = None, + catalog_name: Optional[str] = None, comment: Optional[str] = None, + created_at: Optional[int] = None, + created_by: Optional[str] = None, + metastore_id: Optional[str] = None, + name: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None, + schema_name: Optional[str] = None, + storage_location: Optional[str] = None, + updated_at: Optional[int] = None, + updated_by: Optional[str] = None, ) -> RegisteredModelInfo: """Updates the specified registered model. @@ -14042,22 +14440,67 @@ def update( :param full_name: str The three-level (fully qualified) name of the registered model + :param aliases: List[:class:`RegisteredModelAlias`] (optional) + List of aliases associated with the registered model + :param browse_only: bool (optional) + Indicates whether the principal is limited to retrieving metadata for the associated object through + the BROWSE privilege when include_browse is enabled in the request. + :param catalog_name: str (optional) + The name of the catalog where the schema and the registered model reside :param comment: str (optional) The comment attached to the registered model + :param created_at: int (optional) + Creation timestamp of the registered model in milliseconds since the Unix epoch + :param created_by: str (optional) + The identifier of the user who created the registered model + :param metastore_id: str (optional) + The unique identifier of the metastore + :param name: str (optional) + The name of the registered model :param new_name: str (optional) New name for the registered model. :param owner: str (optional) The identifier of the user who owns the registered model + :param schema_name: str (optional) + The name of the schema where the registered model resides + :param storage_location: str (optional) + The storage location on the cloud under which model version data files are stored + :param updated_at: int (optional) + Last-update timestamp of the registered model in milliseconds since the Unix epoch + :param updated_by: str (optional) + The identifier of the user who updated the registered model last time :returns: :class:`RegisteredModelInfo` """ body = {} + if aliases is not None: + body["aliases"] = [v.as_dict() for v in aliases] + if browse_only is not None: + body["browse_only"] = browse_only + if catalog_name is not None: + body["catalog_name"] = catalog_name if comment is not None: body["comment"] = comment + if created_at is not None: + body["created_at"] = created_at + if created_by is not None: + body["created_by"] = created_by + if metastore_id is not None: + body["metastore_id"] = metastore_id + if name is not None: + body["name"] = name if new_name is not None: body["new_name"] = new_name if owner is not None: body["owner"] = owner + if schema_name is not None: + body["schema_name"] = schema_name + if storage_location is not None: + body["storage_location"] = storage_location + if updated_at is not None: + body["updated_at"] = updated_at + if updated_by is not None: + body["updated_by"] = updated_by headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -14356,6 +14799,14 @@ def list( owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param catalog_name: str Parent catalog for schemas of interest. :param include_browse: bool (optional) @@ -14567,13 +15018,28 @@ def get(self, name: str) -> StorageCredentialInfo: return StorageCredentialInfo.from_dict(res) def list( - self, *, max_results: Optional[int] = None, page_token: Optional[str] = None + self, + *, + include_unbound: Optional[bool] = None, + max_results: Optional[int] = None, + page_token: Optional[str] = None, ) -> Iterator[StorageCredentialInfo]: """Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + + :param include_unbound: bool (optional) + Whether to include credentials not bound to the workspace. Effective only if the user has permission + to update the credential–workspace binding. :param max_results: int (optional) Maximum number of storage credentials to return. If not set, all the storage credentials are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of @@ -14587,6 +15053,8 @@ def list( """ query = {} + if include_unbound is not None: + query["include_unbound"] = include_unbound if max_results is not None: query["max_results"] = max_results if page_token is not None: @@ -14822,6 +15290,14 @@ def list( """Gets an array of system schemas for a metastore. The caller must be an account admin or a metastore admin. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param metastore_id: str The ID for the metastore in which the system schema resides. :param max_results: int (optional) @@ -15118,6 +15594,14 @@ def list( catalog and the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param catalog_name: str Name of parent catalog for tables of interest. :param schema_name: str @@ -15746,9 +16230,11 @@ def update_bindings( :param securable_name: str The name of the securable. :param add: List[:class:`WorkspaceBinding`] (optional) - List of workspace bindings. + List of workspace bindings to add. If a binding for the workspace already exists with a different + binding_type, adding it again with a new binding_type will update the existing binding (e.g., from + READ_WRITE to READ_ONLY). :param remove: List[:class:`WorkspaceBinding`] (optional) - List of workspace bindings. + List of workspace bindings to remove. :returns: :class:`UpdateWorkspaceBindingsResponse` """ diff --git a/databricks/sdk/service/cleanrooms.py b/databricks/sdk/service/cleanrooms.py index 57ea7e961..299d623e3 100755 --- a/databricks/sdk/service/cleanrooms.py +++ b/databricks/sdk/service/cleanrooms.py @@ -10,13 +10,13 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional -from ._internal import Wait, _enum, _from_dict, _repeated_dict +from databricks.sdk.service import catalog, jobs, settings, sharing +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) _LOG = logging.getLogger("databricks.sdk") -from databricks.sdk.service import catalog, jobs, settings, sharing - # all definitions in this file are in alphabetical order diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 11a2a2b78..77feca5fe 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict, _repeated_enum) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") @@ -3135,12 +3137,9 @@ class Environment: """Required. Environment version used by the environment. Each version comes with a specific Python version and a set of Python packages. The version is a string, consisting of an integer.""" - jar_dependencies: Optional[List[str]] = None - """Use `java_dependencies` instead.""" - java_dependencies: Optional[List[str]] = None - """List of jar dependencies, should be string representing volume paths. For example: - `/Volumes/path/to/test.jar`.""" + """List of java dependencies. Each dependency is a string representing a java library path. For + example: `/Volumes/path/to/test.jar`.""" def as_dict(self) -> dict: """Serializes the Environment into a dictionary suitable for use as a JSON request body.""" @@ -3151,8 +3150,6 @@ def as_dict(self) -> dict: body["dependencies"] = [v for v in self.dependencies] if self.environment_version is not None: body["environment_version"] = self.environment_version - if self.jar_dependencies: - body["jar_dependencies"] = [v for v in self.jar_dependencies] if self.java_dependencies: body["java_dependencies"] = [v for v in self.java_dependencies] return body @@ -3166,8 +3163,6 @@ def as_shallow_dict(self) -> dict: body["dependencies"] = self.dependencies if self.environment_version is not None: body["environment_version"] = self.environment_version - if self.jar_dependencies: - body["jar_dependencies"] = self.jar_dependencies if self.java_dependencies: body["java_dependencies"] = self.java_dependencies return body @@ -3179,7 +3174,6 @@ def from_dict(cls, d: Dict[str, Any]) -> Environment: client=d.get("client", None), dependencies=d.get("dependencies", None), environment_version=d.get("environment_version", None), - jar_dependencies=d.get("jar_dependencies", None), java_dependencies=d.get("java_dependencies", None), ) @@ -6621,9 +6615,16 @@ class Results: data: Optional[Any] = None file_name: Optional[str] = None - """The image filename""" + """The image data in one of the following formats: + + 1. A Data URL with base64-encoded image data: `data:image/{type};base64,{base64-data}`. Example: + `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA...` + + 2. A FileStore file path for large images: `/plots/{filename}.png`. Example: + `/plots/b6a7ad70-fb2c-4353-8aed-3f1e015174a4.png`""" file_names: Optional[List[str]] = None + """List of image data for multiple images. Each element follows the same format as file_name.""" is_json_schema: Optional[bool] = None """true if a JSON schema is returned instead of a string representation of the Hive type.""" @@ -7067,6 +7068,7 @@ class TerminationReasonCode(Enum): BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG" BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED" BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE" + CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED = "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED" CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE" CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED" CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE" @@ -7144,6 +7146,7 @@ class TerminationReasonCode(Enum): IN_PENALTY_BOX = "IN_PENALTY_BOX" IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE" JOB_FINISHED = "JOB_FINISHED" + K8S_ACTIVE_POD_QUOTA_EXCEEDED = "K8S_ACTIVE_POD_QUOTA_EXCEEDED" K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE" K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT" LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT" @@ -8493,11 +8496,7 @@ def delete(self, cluster_id: str) -> Wait[ClusterDetails]: } op_response = self._api.do("POST", "/api/2.1/clusters/delete", body=body, headers=headers) - return Wait( - self.wait_get_cluster_terminated, - response=DeleteClusterResponse.from_dict(op_response), - cluster_id=cluster_id, - ) + return Wait(self.wait_get_cluster_terminated, cluster_id=cluster_id) def delete_and_wait(self, cluster_id: str, timeout=timedelta(minutes=20)) -> ClusterDetails: return self.delete(cluster_id=cluster_id).result(timeout=timeout) @@ -8759,9 +8758,7 @@ def edit( } op_response = self._api.do("POST", "/api/2.1/clusters/edit", body=body, headers=headers) - return Wait( - self.wait_get_cluster_running, response=EditClusterResponse.from_dict(op_response), cluster_id=cluster_id - ) + return Wait(self.wait_get_cluster_running, cluster_id=cluster_id) def edit_and_wait( self, @@ -9123,9 +9120,7 @@ def resize( } op_response = self._api.do("POST", "/api/2.1/clusters/resize", body=body, headers=headers) - return Wait( - self.wait_get_cluster_running, response=ResizeClusterResponse.from_dict(op_response), cluster_id=cluster_id - ) + return Wait(self.wait_get_cluster_running, cluster_id=cluster_id) def resize_and_wait( self, @@ -9160,9 +9155,7 @@ def restart(self, cluster_id: str, *, restart_user: Optional[str] = None) -> Wai } op_response = self._api.do("POST", "/api/2.1/clusters/restart", body=body, headers=headers) - return Wait( - self.wait_get_cluster_running, response=RestartClusterResponse.from_dict(op_response), cluster_id=cluster_id - ) + return Wait(self.wait_get_cluster_running, cluster_id=cluster_id) def restart_and_wait( self, cluster_id: str, *, restart_user: Optional[str] = None, timeout=timedelta(minutes=20) @@ -9229,9 +9222,7 @@ def start(self, cluster_id: str) -> Wait[ClusterDetails]: } op_response = self._api.do("POST", "/api/2.1/clusters/start", body=body, headers=headers) - return Wait( - self.wait_get_cluster_running, response=StartClusterResponse.from_dict(op_response), cluster_id=cluster_id - ) + return Wait(self.wait_get_cluster_running, cluster_id=cluster_id) def start_and_wait(self, cluster_id: str, timeout=timedelta(minutes=20)) -> ClusterDetails: return self.start(cluster_id=cluster_id).result(timeout=timeout) @@ -9302,9 +9293,7 @@ def update( } op_response = self._api.do("POST", "/api/2.1/clusters/update", body=body, headers=headers) - return Wait( - self.wait_get_cluster_running, response=UpdateClusterResponse.from_dict(op_response), cluster_id=cluster_id - ) + return Wait(self.wait_get_cluster_running, cluster_id=cluster_id) def update_and_wait( self, @@ -9485,7 +9474,6 @@ def cancel( op_response = self._api.do("POST", "/api/1.2/commands/cancel", body=body, headers=headers) return Wait( self.wait_command_status_command_execution_cancelled, - response=CancelResponse.from_dict(op_response), cluster_id=cluster_id, command_id=command_id, context_id=context_id, diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 9ea51bfb7..69b544f7d 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -10,14 +10,15 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service import sql +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") -from databricks.sdk.service import sql - # all definitions in this file are in alphabetical order @@ -1153,6 +1154,9 @@ class MessageErrorType(Enum): GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION = "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION" GENERIC_SQL_EXEC_API_CALL_EXCEPTION = "GENERIC_SQL_EXEC_API_CALL_EXCEPTION" ILLEGAL_PARAMETER_DEFINITION_EXCEPTION = "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION" + INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION = "INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION" + INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION = "INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION" + INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION = "INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION" INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION = "INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION" INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION = "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION" INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION = "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION" diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index 73d084ce2..aed67de1d 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -10,7 +10,8 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional -from ._internal import Wait, _enum, _from_dict, _repeated_dict +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) _LOG = logging.getLogger("databricks.sdk") @@ -948,7 +949,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RequestedResource: @dataclass class SyncedDatabaseTable: - """Next field marker: 14""" + """Next field marker: 18""" name: str """Full three-part (catalog, schema, table) name of the table.""" diff --git a/databricks/sdk/service/dataquality.py b/databricks/sdk/service/dataquality.py new file mode 100755 index 000000000..a5a2f8710 --- /dev/null +++ b/databricks/sdk/service/dataquality.py @@ -0,0 +1,1145 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional + +from databricks.sdk.service._internal import (_enum, _from_dict, + _repeated_dict, _repeated_enum) + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +class AggregationGranularity(Enum): + """The granularity for aggregating data into time windows based on their timestamp.""" + + AGGREGATION_GRANULARITY_1_DAY = "AGGREGATION_GRANULARITY_1_DAY" + AGGREGATION_GRANULARITY_1_HOUR = "AGGREGATION_GRANULARITY_1_HOUR" + AGGREGATION_GRANULARITY_1_MONTH = "AGGREGATION_GRANULARITY_1_MONTH" + AGGREGATION_GRANULARITY_1_WEEK = "AGGREGATION_GRANULARITY_1_WEEK" + AGGREGATION_GRANULARITY_1_YEAR = "AGGREGATION_GRANULARITY_1_YEAR" + AGGREGATION_GRANULARITY_2_WEEKS = "AGGREGATION_GRANULARITY_2_WEEKS" + AGGREGATION_GRANULARITY_30_MINUTES = "AGGREGATION_GRANULARITY_30_MINUTES" + AGGREGATION_GRANULARITY_3_WEEKS = "AGGREGATION_GRANULARITY_3_WEEKS" + AGGREGATION_GRANULARITY_4_WEEKS = "AGGREGATION_GRANULARITY_4_WEEKS" + AGGREGATION_GRANULARITY_5_MINUTES = "AGGREGATION_GRANULARITY_5_MINUTES" + + +@dataclass +class AnomalyDetectionConfig: + """Anomaly Detection Configurations.""" + + def as_dict(self) -> dict: + """Serializes the AnomalyDetectionConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AnomalyDetectionConfig into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AnomalyDetectionConfig: + """Deserializes the AnomalyDetectionConfig from a dictionary.""" + return cls() + + +@dataclass +class CancelRefreshResponse: + """Response to cancelling a refresh.""" + + refresh: Optional[Refresh] = None + """The refresh to cancel.""" + + def as_dict(self) -> dict: + """Serializes the CancelRefreshResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.refresh: + body["refresh"] = self.refresh.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CancelRefreshResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.refresh: + body["refresh"] = self.refresh + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CancelRefreshResponse: + """Deserializes the CancelRefreshResponse from a dictionary.""" + return cls(refresh=_from_dict(d, "refresh", Refresh)) + + +@dataclass +class CronSchedule: + """The data quality monitoring workflow cron schedule.""" + + quartz_cron_expression: str + """The expression that determines when to run the monitor. See [examples]. + + [examples]: https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html""" + + timezone_id: str + """A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See + `Java TimeZone `_ for details. + The timezone id (e.g., ``America/Los_Angeles``) in which to evaluate the quartz expression.""" + + pause_status: Optional[CronSchedulePauseStatus] = None + """Read only field that indicates whether the schedule is paused or not.""" + + def as_dict(self) -> dict: + """Serializes the CronSchedule into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.pause_status is not None: + body["pause_status"] = self.pause_status.value + if self.quartz_cron_expression is not None: + body["quartz_cron_expression"] = self.quartz_cron_expression + if self.timezone_id is not None: + body["timezone_id"] = self.timezone_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CronSchedule into a shallow dictionary of its immediate attributes.""" + body = {} + if self.pause_status is not None: + body["pause_status"] = self.pause_status + if self.quartz_cron_expression is not None: + body["quartz_cron_expression"] = self.quartz_cron_expression + if self.timezone_id is not None: + body["timezone_id"] = self.timezone_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CronSchedule: + """Deserializes the CronSchedule from a dictionary.""" + return cls( + pause_status=_enum(d, "pause_status", CronSchedulePauseStatus), + quartz_cron_expression=d.get("quartz_cron_expression", None), + timezone_id=d.get("timezone_id", None), + ) + + +class CronSchedulePauseStatus(Enum): + """The data quality monitoring workflow cron schedule pause status.""" + + CRON_SCHEDULE_PAUSE_STATUS_PAUSED = "CRON_SCHEDULE_PAUSE_STATUS_PAUSED" + CRON_SCHEDULE_PAUSE_STATUS_UNPAUSED = "CRON_SCHEDULE_PAUSE_STATUS_UNPAUSED" + + +@dataclass +class DataProfilingConfig: + """Data Profiling Configurations.""" + + output_schema_id: str + """ID of the schema where output tables are created.""" + + assets_dir: Optional[str] = None + """Field for specifying the absolute path to a custom directory to store data-monitoring assets. + Normally prepopulated to a default user location via UI and Python APIs.""" + + baseline_table_name: Optional[str] = None + """Baseline table name. Baseline data is used to compute drift from the data in the monitored + `table_name`. The baseline table and the monitored table shall have the same schema.""" + + custom_metrics: Optional[List[DataProfilingCustomMetric]] = None + """Custom metrics.""" + + dashboard_id: Optional[str] = None + """Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in + PENDING state.""" + + drift_metrics_table_name: Optional[str] = None + """Table that stores drift metrics data. Format: `catalog.schema.table_name`.""" + + effective_warehouse_id: Optional[str] = None + """The warehouse for dashboard creation""" + + inference_log: Optional[InferenceLogConfig] = None + """Configuration for monitoring inference log tables.""" + + latest_monitor_failure_message: Optional[str] = None + """The latest error message for a monitor failure.""" + + monitor_version: Optional[int] = None + """Represents the current monitor configuration version in use. The version will be represented in + a numeric fashion (1,2,3...). The field has flexibility to take on negative values, which can + indicate corrupted monitor_version numbers.""" + + monitored_table_name: Optional[str] = None + """Unity Catalog table to monitor. Format: `catalog.schema.table_name`""" + + notification_settings: Optional[NotificationSettings] = None + """Field for specifying notification settings.""" + + profile_metrics_table_name: Optional[str] = None + """Table that stores profile metrics data. Format: `catalog.schema.table_name`.""" + + schedule: Optional[CronSchedule] = None + """The cron schedule.""" + + skip_builtin_dashboard: Optional[bool] = None + """Whether to skip creating a default dashboard summarizing data quality metrics.""" + + slicing_exprs: Optional[List[str]] = None + """List of column expressions to slice data with for targeted analysis. The data is grouped by each + expression independently, resulting in a separate slice for each predicate and its complements. + For example `slicing_exprs=[“col_1”, “col_2 > 10”]` will generate the following slices: + two slices for `col_2 > 10` (True and False), and one slice per unique value in `col1`. For + high-cardinality columns, only the top 100 unique values by frequency will generate slices.""" + + snapshot: Optional[SnapshotConfig] = None + """Configuration for monitoring snapshot tables.""" + + status: Optional[DataProfilingStatus] = None + """The data profiling monitor status.""" + + time_series: Optional[TimeSeriesConfig] = None + """Configuration for monitoring time series tables.""" + + warehouse_id: Optional[str] = None + """Optional argument to specify the warehouse for dashboard creation. If not specified, the first + running warehouse will be used.""" + + def as_dict(self) -> dict: + """Serializes the DataProfilingConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.assets_dir is not None: + body["assets_dir"] = self.assets_dir + if self.baseline_table_name is not None: + body["baseline_table_name"] = self.baseline_table_name + if self.custom_metrics: + body["custom_metrics"] = [v.as_dict() for v in self.custom_metrics] + if self.dashboard_id is not None: + body["dashboard_id"] = self.dashboard_id + if self.drift_metrics_table_name is not None: + body["drift_metrics_table_name"] = self.drift_metrics_table_name + if self.effective_warehouse_id is not None: + body["effective_warehouse_id"] = self.effective_warehouse_id + if self.inference_log: + body["inference_log"] = self.inference_log.as_dict() + if self.latest_monitor_failure_message is not None: + body["latest_monitor_failure_message"] = self.latest_monitor_failure_message + if self.monitor_version is not None: + body["monitor_version"] = self.monitor_version + if self.monitored_table_name is not None: + body["monitored_table_name"] = self.monitored_table_name + if self.notification_settings: + body["notification_settings"] = self.notification_settings.as_dict() + if self.output_schema_id is not None: + body["output_schema_id"] = self.output_schema_id + if self.profile_metrics_table_name is not None: + body["profile_metrics_table_name"] = self.profile_metrics_table_name + if self.schedule: + body["schedule"] = self.schedule.as_dict() + if self.skip_builtin_dashboard is not None: + body["skip_builtin_dashboard"] = self.skip_builtin_dashboard + if self.slicing_exprs: + body["slicing_exprs"] = [v for v in self.slicing_exprs] + if self.snapshot: + body["snapshot"] = self.snapshot.as_dict() + if self.status is not None: + body["status"] = self.status.value + if self.time_series: + body["time_series"] = self.time_series.as_dict() + if self.warehouse_id is not None: + body["warehouse_id"] = self.warehouse_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DataProfilingConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.assets_dir is not None: + body["assets_dir"] = self.assets_dir + if self.baseline_table_name is not None: + body["baseline_table_name"] = self.baseline_table_name + if self.custom_metrics: + body["custom_metrics"] = self.custom_metrics + if self.dashboard_id is not None: + body["dashboard_id"] = self.dashboard_id + if self.drift_metrics_table_name is not None: + body["drift_metrics_table_name"] = self.drift_metrics_table_name + if self.effective_warehouse_id is not None: + body["effective_warehouse_id"] = self.effective_warehouse_id + if self.inference_log: + body["inference_log"] = self.inference_log + if self.latest_monitor_failure_message is not None: + body["latest_monitor_failure_message"] = self.latest_monitor_failure_message + if self.monitor_version is not None: + body["monitor_version"] = self.monitor_version + if self.monitored_table_name is not None: + body["monitored_table_name"] = self.monitored_table_name + if self.notification_settings: + body["notification_settings"] = self.notification_settings + if self.output_schema_id is not None: + body["output_schema_id"] = self.output_schema_id + if self.profile_metrics_table_name is not None: + body["profile_metrics_table_name"] = self.profile_metrics_table_name + if self.schedule: + body["schedule"] = self.schedule + if self.skip_builtin_dashboard is not None: + body["skip_builtin_dashboard"] = self.skip_builtin_dashboard + if self.slicing_exprs: + body["slicing_exprs"] = self.slicing_exprs + if self.snapshot: + body["snapshot"] = self.snapshot + if self.status is not None: + body["status"] = self.status + if self.time_series: + body["time_series"] = self.time_series + if self.warehouse_id is not None: + body["warehouse_id"] = self.warehouse_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DataProfilingConfig: + """Deserializes the DataProfilingConfig from a dictionary.""" + return cls( + assets_dir=d.get("assets_dir", None), + baseline_table_name=d.get("baseline_table_name", None), + custom_metrics=_repeated_dict(d, "custom_metrics", DataProfilingCustomMetric), + dashboard_id=d.get("dashboard_id", None), + drift_metrics_table_name=d.get("drift_metrics_table_name", None), + effective_warehouse_id=d.get("effective_warehouse_id", None), + inference_log=_from_dict(d, "inference_log", InferenceLogConfig), + latest_monitor_failure_message=d.get("latest_monitor_failure_message", None), + monitor_version=d.get("monitor_version", None), + monitored_table_name=d.get("monitored_table_name", None), + notification_settings=_from_dict(d, "notification_settings", NotificationSettings), + output_schema_id=d.get("output_schema_id", None), + profile_metrics_table_name=d.get("profile_metrics_table_name", None), + schedule=_from_dict(d, "schedule", CronSchedule), + skip_builtin_dashboard=d.get("skip_builtin_dashboard", None), + slicing_exprs=d.get("slicing_exprs", None), + snapshot=_from_dict(d, "snapshot", SnapshotConfig), + status=_enum(d, "status", DataProfilingStatus), + time_series=_from_dict(d, "time_series", TimeSeriesConfig), + warehouse_id=d.get("warehouse_id", None), + ) + + +@dataclass +class DataProfilingCustomMetric: + """Custom metric definition.""" + + name: str + """Name of the metric in the output tables.""" + + definition: str + """Jinja template for a SQL expression that specifies how to compute the metric. See [create metric + definition]. + + [create metric definition]: https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition""" + + input_columns: List[str] + """A list of column names in the input table the metric should be computed for. Can use + ``":table"`` to indicate that the metric needs information from multiple columns.""" + + output_data_type: str + """The output type of the custom metric.""" + + type: DataProfilingCustomMetricType + """The type of the custom metric.""" + + def as_dict(self) -> dict: + """Serializes the DataProfilingCustomMetric into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.definition is not None: + body["definition"] = self.definition + if self.input_columns: + body["input_columns"] = [v for v in self.input_columns] + if self.name is not None: + body["name"] = self.name + if self.output_data_type is not None: + body["output_data_type"] = self.output_data_type + if self.type is not None: + body["type"] = self.type.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DataProfilingCustomMetric into a shallow dictionary of its immediate attributes.""" + body = {} + if self.definition is not None: + body["definition"] = self.definition + if self.input_columns: + body["input_columns"] = self.input_columns + if self.name is not None: + body["name"] = self.name + if self.output_data_type is not None: + body["output_data_type"] = self.output_data_type + if self.type is not None: + body["type"] = self.type + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DataProfilingCustomMetric: + """Deserializes the DataProfilingCustomMetric from a dictionary.""" + return cls( + definition=d.get("definition", None), + input_columns=d.get("input_columns", None), + name=d.get("name", None), + output_data_type=d.get("output_data_type", None), + type=_enum(d, "type", DataProfilingCustomMetricType), + ) + + +class DataProfilingCustomMetricType(Enum): + """The custom metric type.""" + + DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE = "DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE" + DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED = "DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED" + DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT = "DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT" + + +class DataProfilingStatus(Enum): + """The status of the data profiling monitor.""" + + DATA_PROFILING_STATUS_ACTIVE = "DATA_PROFILING_STATUS_ACTIVE" + DATA_PROFILING_STATUS_DELETE_PENDING = "DATA_PROFILING_STATUS_DELETE_PENDING" + DATA_PROFILING_STATUS_ERROR = "DATA_PROFILING_STATUS_ERROR" + DATA_PROFILING_STATUS_FAILED = "DATA_PROFILING_STATUS_FAILED" + DATA_PROFILING_STATUS_PENDING = "DATA_PROFILING_STATUS_PENDING" + + +@dataclass +class InferenceLogConfig: + """Inference log configuration.""" + + problem_type: InferenceProblemType + """Problem type the model aims to solve.""" + + timestamp_column: str + """Column for the timestamp.""" + + granularities: List[AggregationGranularity] + """List of granularities to use when aggregating data into time windows based on their timestamp.""" + + prediction_column: str + """Column for the prediction.""" + + model_id_column: str + """Column for the model identifier.""" + + label_column: Optional[str] = None + """Column for the label.""" + + def as_dict(self) -> dict: + """Serializes the InferenceLogConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.granularities: + body["granularities"] = [v.value for v in self.granularities] + if self.label_column is not None: + body["label_column"] = self.label_column + if self.model_id_column is not None: + body["model_id_column"] = self.model_id_column + if self.prediction_column is not None: + body["prediction_column"] = self.prediction_column + if self.problem_type is not None: + body["problem_type"] = self.problem_type.value + if self.timestamp_column is not None: + body["timestamp_column"] = self.timestamp_column + return body + + def as_shallow_dict(self) -> dict: + """Serializes the InferenceLogConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.granularities: + body["granularities"] = self.granularities + if self.label_column is not None: + body["label_column"] = self.label_column + if self.model_id_column is not None: + body["model_id_column"] = self.model_id_column + if self.prediction_column is not None: + body["prediction_column"] = self.prediction_column + if self.problem_type is not None: + body["problem_type"] = self.problem_type + if self.timestamp_column is not None: + body["timestamp_column"] = self.timestamp_column + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> InferenceLogConfig: + """Deserializes the InferenceLogConfig from a dictionary.""" + return cls( + granularities=_repeated_enum(d, "granularities", AggregationGranularity), + label_column=d.get("label_column", None), + model_id_column=d.get("model_id_column", None), + prediction_column=d.get("prediction_column", None), + problem_type=_enum(d, "problem_type", InferenceProblemType), + timestamp_column=d.get("timestamp_column", None), + ) + + +class InferenceProblemType(Enum): + """Inference problem type the model aims to solve.""" + + INFERENCE_PROBLEM_TYPE_CLASSIFICATION = "INFERENCE_PROBLEM_TYPE_CLASSIFICATION" + INFERENCE_PROBLEM_TYPE_REGRESSION = "INFERENCE_PROBLEM_TYPE_REGRESSION" + + +@dataclass +class ListMonitorResponse: + """Response for listing Monitors.""" + + monitors: Optional[List[Monitor]] = None + + next_page_token: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the ListMonitorResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.monitors: + body["monitors"] = [v.as_dict() for v in self.monitors] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListMonitorResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.monitors: + body["monitors"] = self.monitors + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListMonitorResponse: + """Deserializes the ListMonitorResponse from a dictionary.""" + return cls(monitors=_repeated_dict(d, "monitors", Monitor), next_page_token=d.get("next_page_token", None)) + + +@dataclass +class ListRefreshResponse: + """Response for listing refreshes.""" + + next_page_token: Optional[str] = None + + refreshes: Optional[List[Refresh]] = None + + def as_dict(self) -> dict: + """Serializes the ListRefreshResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.refreshes: + body["refreshes"] = [v.as_dict() for v in self.refreshes] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListRefreshResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.refreshes: + body["refreshes"] = self.refreshes + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListRefreshResponse: + """Deserializes the ListRefreshResponse from a dictionary.""" + return cls(next_page_token=d.get("next_page_token", None), refreshes=_repeated_dict(d, "refreshes", Refresh)) + + +@dataclass +class Monitor: + """Monitor for the data quality of unity catalog entities such as schema or table.""" + + object_type: str + """The type of the monitored object. Can be one of the following: `schema` or `table`.""" + + object_id: str + """The UUID of the request object. For example, schema id.""" + + anomaly_detection_config: Optional[AnomalyDetectionConfig] = None + """Anomaly Detection Configuration, applicable to `schema` object types.""" + + data_profiling_config: Optional[DataProfilingConfig] = None + """Data Profiling Configuration, applicable to `table` object types""" + + def as_dict(self) -> dict: + """Serializes the Monitor into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.anomaly_detection_config: + body["anomaly_detection_config"] = self.anomaly_detection_config.as_dict() + if self.data_profiling_config: + body["data_profiling_config"] = self.data_profiling_config.as_dict() + if self.object_id is not None: + body["object_id"] = self.object_id + if self.object_type is not None: + body["object_type"] = self.object_type + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Monitor into a shallow dictionary of its immediate attributes.""" + body = {} + if self.anomaly_detection_config: + body["anomaly_detection_config"] = self.anomaly_detection_config + if self.data_profiling_config: + body["data_profiling_config"] = self.data_profiling_config + if self.object_id is not None: + body["object_id"] = self.object_id + if self.object_type is not None: + body["object_type"] = self.object_type + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Monitor: + """Deserializes the Monitor from a dictionary.""" + return cls( + anomaly_detection_config=_from_dict(d, "anomaly_detection_config", AnomalyDetectionConfig), + data_profiling_config=_from_dict(d, "data_profiling_config", DataProfilingConfig), + object_id=d.get("object_id", None), + object_type=d.get("object_type", None), + ) + + +@dataclass +class NotificationDestination: + """Destination of the data quality monitoring notification.""" + + email_addresses: Optional[List[str]] = None + """The list of email addresses to send the notification to. A maximum of 5 email addresses is + supported.""" + + def as_dict(self) -> dict: + """Serializes the NotificationDestination into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.email_addresses: + body["email_addresses"] = [v for v in self.email_addresses] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NotificationDestination into a shallow dictionary of its immediate attributes.""" + body = {} + if self.email_addresses: + body["email_addresses"] = self.email_addresses + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NotificationDestination: + """Deserializes the NotificationDestination from a dictionary.""" + return cls(email_addresses=d.get("email_addresses", None)) + + +@dataclass +class NotificationSettings: + """Settings for sending notifications on the data quality monitoring.""" + + on_failure: Optional[NotificationDestination] = None + """Destinations to send notifications on failure/timeout.""" + + def as_dict(self) -> dict: + """Serializes the NotificationSettings into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.on_failure: + body["on_failure"] = self.on_failure.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NotificationSettings into a shallow dictionary of its immediate attributes.""" + body = {} + if self.on_failure: + body["on_failure"] = self.on_failure + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NotificationSettings: + """Deserializes the NotificationSettings from a dictionary.""" + return cls(on_failure=_from_dict(d, "on_failure", NotificationDestination)) + + +@dataclass +class Refresh: + """The Refresh object gives information on a refresh of the data quality monitoring pipeline.""" + + object_type: str + """The type of the monitored object. Can be one of the following: `schema`or `table`.""" + + object_id: str + """The UUID of the request object. For example, table id.""" + + end_time_ms: Optional[int] = None + """Time when the refresh ended (milliseconds since 1/1/1970 UTC).""" + + message: Optional[str] = None + """An optional message to give insight into the current state of the refresh (e.g. FAILURE + messages).""" + + refresh_id: Optional[int] = None + """Unique id of the refresh operation.""" + + start_time_ms: Optional[int] = None + """Time when the refresh started (milliseconds since 1/1/1970 UTC).""" + + state: Optional[RefreshState] = None + """The current state of the refresh.""" + + trigger: Optional[RefreshTrigger] = None + """What triggered the refresh.""" + + def as_dict(self) -> dict: + """Serializes the Refresh into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.end_time_ms is not None: + body["end_time_ms"] = self.end_time_ms + if self.message is not None: + body["message"] = self.message + if self.object_id is not None: + body["object_id"] = self.object_id + if self.object_type is not None: + body["object_type"] = self.object_type + if self.refresh_id is not None: + body["refresh_id"] = self.refresh_id + if self.start_time_ms is not None: + body["start_time_ms"] = self.start_time_ms + if self.state is not None: + body["state"] = self.state.value + if self.trigger is not None: + body["trigger"] = self.trigger.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Refresh into a shallow dictionary of its immediate attributes.""" + body = {} + if self.end_time_ms is not None: + body["end_time_ms"] = self.end_time_ms + if self.message is not None: + body["message"] = self.message + if self.object_id is not None: + body["object_id"] = self.object_id + if self.object_type is not None: + body["object_type"] = self.object_type + if self.refresh_id is not None: + body["refresh_id"] = self.refresh_id + if self.start_time_ms is not None: + body["start_time_ms"] = self.start_time_ms + if self.state is not None: + body["state"] = self.state + if self.trigger is not None: + body["trigger"] = self.trigger + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Refresh: + """Deserializes the Refresh from a dictionary.""" + return cls( + end_time_ms=d.get("end_time_ms", None), + message=d.get("message", None), + object_id=d.get("object_id", None), + object_type=d.get("object_type", None), + refresh_id=d.get("refresh_id", None), + start_time_ms=d.get("start_time_ms", None), + state=_enum(d, "state", RefreshState), + trigger=_enum(d, "trigger", RefreshTrigger), + ) + + +class RefreshState(Enum): + """The state of the refresh.""" + + MONITOR_REFRESH_STATE_CANCELED = "MONITOR_REFRESH_STATE_CANCELED" + MONITOR_REFRESH_STATE_FAILED = "MONITOR_REFRESH_STATE_FAILED" + MONITOR_REFRESH_STATE_PENDING = "MONITOR_REFRESH_STATE_PENDING" + MONITOR_REFRESH_STATE_RUNNING = "MONITOR_REFRESH_STATE_RUNNING" + MONITOR_REFRESH_STATE_SUCCESS = "MONITOR_REFRESH_STATE_SUCCESS" + MONITOR_REFRESH_STATE_UNKNOWN = "MONITOR_REFRESH_STATE_UNKNOWN" + + +class RefreshTrigger(Enum): + """The trigger of the refresh.""" + + MONITOR_REFRESH_TRIGGER_DATA_CHANGE = "MONITOR_REFRESH_TRIGGER_DATA_CHANGE" + MONITOR_REFRESH_TRIGGER_MANUAL = "MONITOR_REFRESH_TRIGGER_MANUAL" + MONITOR_REFRESH_TRIGGER_SCHEDULE = "MONITOR_REFRESH_TRIGGER_SCHEDULE" + MONITOR_REFRESH_TRIGGER_UNKNOWN = "MONITOR_REFRESH_TRIGGER_UNKNOWN" + + +@dataclass +class SnapshotConfig: + """Snapshot analysis configuration.""" + + def as_dict(self) -> dict: + """Serializes the SnapshotConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SnapshotConfig into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SnapshotConfig: + """Deserializes the SnapshotConfig from a dictionary.""" + return cls() + + +@dataclass +class TimeSeriesConfig: + """Time series analysis configuration.""" + + timestamp_column: str + """Column for the timestamp.""" + + granularities: List[AggregationGranularity] + """List of granularities to use when aggregating data into time windows based on their timestamp.""" + + def as_dict(self) -> dict: + """Serializes the TimeSeriesConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.granularities: + body["granularities"] = [v.value for v in self.granularities] + if self.timestamp_column is not None: + body["timestamp_column"] = self.timestamp_column + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TimeSeriesConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.granularities: + body["granularities"] = self.granularities + if self.timestamp_column is not None: + body["timestamp_column"] = self.timestamp_column + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TimeSeriesConfig: + """Deserializes the TimeSeriesConfig from a dictionary.""" + return cls( + granularities=_repeated_enum(d, "granularities", AggregationGranularity), + timestamp_column=d.get("timestamp_column", None), + ) + + +class DataQualityAPI: + """Manage the data quality of Unity Catalog objects (currently support `schema` and `table`)""" + + def __init__(self, api_client): + self._api = api_client + + def cancel_refresh(self, object_type: str, object_id: str, refresh_id: int) -> CancelRefreshResponse: + """Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + + :returns: :class:`CancelRefreshResponse` + """ + + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", + f"/api/data-quality/v1/monitors/{object_type}/{object_id}/refreshes/{refresh_id}/cancel", + headers=headers, + ) + return CancelRefreshResponse.from_dict(res) + + def create_monitor(self, monitor: Monitor) -> Monitor: + """Create a data quality monitor on a Unity Catalog object. The caller must provide either + `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog, + have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the table 2. have + **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent schema, and have + **SELECT** access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's + parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. + + Workspace assets, such as the dashboard, will be created in the workspace where this call was made. + + :param monitor: :class:`Monitor` + The monitor to create. + + :returns: :class:`Monitor` + """ + body = monitor.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/data-quality/v1/monitors", body=body, headers=headers) + return Monitor.from_dict(res) + + def create_refresh(self, object_type: str, object_id: str, refresh: Refresh) -> Refresh: + """Creates a refresh. Currently only supported for the `table` `object_type`. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema`or `table`. + :param object_id: str + The UUID of the request object. For example, table id. + :param refresh: :class:`Refresh` + The refresh to create + + :returns: :class:`Refresh` + """ + body = refresh.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", f"/api/data-quality/v1/monitors/{object_type}/{object_id}/refreshes", body=body, headers=headers + ) + return Refresh.from_dict(res) + + def delete_monitor(self, object_type: str, object_id: str): + """Delete a data quality monitor on Unity Catalog object. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - be an owner of the table. + + Note that the metric tables and dashboard will not be deleted as part of this call; those assets must + be manually cleaned up (if desired). + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/data-quality/v1/monitors/{object_type}/{object_id}", headers=headers) + + def delete_refresh(self, object_type: str, object_id: str, refresh_id: int): + """(Unimplemented) Delete a refresh + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", f"/api/data-quality/v1/monitors/{object_type}/{object_id}/refreshes/{refresh_id}", headers=headers + ) + + def get_monitor(self, object_type: str, object_id: str) -> Monitor: + """Read a data quality monitor on Unity Catalog object. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema. 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - **SELECT** privilege on the table. + + The returned information includes configuration values, as well as information on assets created by + the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different + workspace than where the monitor was created. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + + :returns: :class:`Monitor` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/data-quality/v1/monitors/{object_type}/{object_id}", headers=headers) + return Monitor.from_dict(res) + + def get_refresh(self, object_type: str, object_id: str, refresh_id: int) -> Refresh: + """Get data quality monitor refresh. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - **SELECT** privilege on the table. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + + :returns: :class:`Refresh` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/data-quality/v1/monitors/{object_type}/{object_id}/refreshes/{refresh_id}", headers=headers + ) + return Refresh.from_dict(res) + + def list_monitor(self, *, page_size: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[Monitor]: + """(Unimplemented) List data quality monitors. + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`Monitor` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", "/api/data-quality/v1/monitors", query=query, headers=headers) + if "monitors" in json: + for v in json["monitors"]: + yield Monitor.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def list_refresh( + self, object_type: str, object_id: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[Refresh]: + """List data quality monitor refreshes. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - **SELECT** privilege on the table. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`Refresh` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", + f"/api/data-quality/v1/monitors/{object_type}/{object_id}/refreshes", + query=query, + headers=headers, + ) + if "refreshes" in json: + for v in json["refreshes"]: + yield Refresh.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def update_monitor(self, object_type: str, object_id: str, monitor: Monitor, update_mask: str) -> Monitor: + """Update a data quality monitor on Unity Catalog object. + + For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - be an owner of the table. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param monitor: :class:`Monitor` + The monitor to update. + :param update_mask: str + The field mask to specify which fields to update as a comma-separated list. Example value: + `data_profiling_config.custom_metrics,data_profiling_config.schedule.quartz_cron_expression` + + :returns: :class:`Monitor` + """ + body = monitor.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", f"/api/data-quality/v1/monitors/{object_type}/{object_id}", query=query, body=body, headers=headers + ) + return Monitor.from_dict(res) + + def update_refresh( + self, object_type: str, object_id: str, refresh_id: int, refresh: Refresh, update_mask: str + ) -> Refresh: + """(Unimplemented) Update a refresh + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + :param refresh: :class:`Refresh` + The refresh to update. + :param update_mask: str + The field mask to specify which fields to update. + + :returns: :class:`Refresh` + """ + body = refresh.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/data-quality/v1/monitors/{object_type}/{object_id}/refreshes/{refresh_id}", + query=query, + body=body, + headers=headers, + ) + return Refresh.from_dict(res) diff --git a/databricks/sdk/service/files.py b/databricks/sdk/service/files.py index 2117a09f3..e26de85ab 100755 --- a/databricks/sdk/service/files.py +++ b/databricks/sdk/service/files.py @@ -6,7 +6,8 @@ from dataclasses import dataclass from typing import Any, BinaryIO, Dict, Iterator, List, Optional -from ._internal import _escape_multi_segment_path_parameter, _repeated_dict +from databricks.sdk.service._internal import ( + _escape_multi_segment_path_parameter, _repeated_dict) _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index a470d7544..0a1f53ca2 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -7,7 +7,8 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict, _repeated_enum +from databricks.sdk.service._internal import (_enum, _from_dict, + _repeated_dict, _repeated_enum) _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/iamv2.py b/databricks/sdk/service/iamv2.py index 25cd2ad25..b95605da5 100755 --- a/databricks/sdk/service/iamv2.py +++ b/databricks/sdk/service/iamv2.py @@ -7,7 +7,7 @@ from enum import Enum from typing import Any, Dict, List, Optional -from ._internal import _enum, _from_dict, _repeated_enum +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 9519e8ba7..8aa530264 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -10,14 +10,15 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service import compute +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") -from databricks.sdk.service import compute - # all definitions in this file are in alphabetical order @@ -7318,7 +7319,7 @@ class TableUpdateTriggerConfiguration: last time the trigger fired. The minimum allowed value is 60 seconds.""" table_names: Optional[List[str]] = None - """A list of Delta tables to monitor for changes. The table name must be in the format + """A list of tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.""" wait_after_last_change_seconds: Optional[int] = None @@ -8428,11 +8429,7 @@ def cancel_run(self, run_id: int) -> Wait[Run]: } op_response = self._api.do("POST", "/api/2.2/jobs/runs/cancel", body=body, headers=headers) - return Wait( - self.wait_get_run_job_terminated_or_skipped, - response=CancelRunResponse.from_dict(op_response), - run_id=run_id, - ) + return Wait(self.wait_get_run_job_terminated_or_skipped, run_id=run_id) def cancel_run_and_wait(self, run_id: int, timeout=timedelta(minutes=20)) -> Run: return self.cancel_run(run_id=run_id).result(timeout=timeout) diff --git a/databricks/sdk/service/marketplace.py b/databricks/sdk/service/marketplace.py index a199010ab..8fc57b942 100755 --- a/databricks/sdk/service/marketplace.py +++ b/databricks/sdk/service/marketplace.py @@ -7,7 +7,8 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict, _repeated_enum +from databricks.sdk.service._internal import (_enum, _from_dict, + _repeated_dict, _repeated_enum) _LOG = logging.getLogger("databricks.sdk") @@ -44,6 +45,7 @@ class AssetType(Enum): ASSET_TYPE_APP = "ASSET_TYPE_APP" ASSET_TYPE_DATA_TABLE = "ASSET_TYPE_DATA_TABLE" ASSET_TYPE_GIT_REPO = "ASSET_TYPE_GIT_REPO" + ASSET_TYPE_MCP = "ASSET_TYPE_MCP" ASSET_TYPE_MEDIA = "ASSET_TYPE_MEDIA" ASSET_TYPE_MODEL = "ASSET_TYPE_MODEL" ASSET_TYPE_NOTEBOOK = "ASSET_TYPE_NOTEBOOK" diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 6a132360c..1e0d689d8 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict, _repeated_enum) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/oauth2.py b/databricks/sdk/service/oauth2.py index 58c57808d..32dcaf8f3 100755 --- a/databricks/sdk/service/oauth2.py +++ b/databricks/sdk/service/oauth2.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from typing import Any, Dict, Iterator, List, Optional -from ._internal import _from_dict, _repeated_dict +from databricks.sdk.service._internal import _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index ff309b6ae..6ea0874e5 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -10,14 +10,15 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service import compute +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict, _repeated_enum) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") -from databricks.sdk.service import compute - # all definitions in this file are in alphabetical order @@ -3775,9 +3776,7 @@ def stop(self, pipeline_id: str) -> Wait[GetPipelineResponse]: } op_response = self._api.do("POST", f"/api/2.0/pipelines/{pipeline_id}/stop", headers=headers) - return Wait( - self.wait_get_pipeline_idle, response=StopPipelineResponse.from_dict(op_response), pipeline_id=pipeline_id - ) + return Wait(self.wait_get_pipeline_idle, pipeline_id=pipeline_id) def stop_and_wait(self, pipeline_id: str, timeout=timedelta(minutes=20)) -> GetPipelineResponse: return self.stop(pipeline_id=pipeline_id).result(timeout=timeout) diff --git a/databricks/sdk/service/provisioning.py b/databricks/sdk/service/provisioning.py index 8e34b28f0..857f03012 100755 --- a/databricks/sdk/service/provisioning.py +++ b/databricks/sdk/service/provisioning.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict, _repeated_enum) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") @@ -55,9 +57,9 @@ class AwsKeyInfo: """The AWS KMS key alias.""" reuse_key_for_cluster_volumes: Optional[bool] = None - """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to `true` + """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to true or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to use this - key for encrypting EBS volumes, set to `false`.""" + key for encrypting EBS volumes, set to false.""" def as_dict(self) -> dict: """Serializes the AwsKeyInfo into a dictionary suitable for use as a JSON request body.""" @@ -96,6 +98,75 @@ def from_dict(cls, d: Dict[str, Any]) -> AwsKeyInfo: ) +@dataclass +class AzureKeyInfo: + disk_encryption_set_id: Optional[str] = None + """The Disk Encryption Set id that is used to represent the key info used for Managed Disk BYOK use + case""" + + key_access_configuration: Optional[KeyAccessConfiguration] = None + """The structure to store key access credential This is set if the Managed Identity is being used + to access the Azure Key Vault key.""" + + key_name: Optional[str] = None + """The name of the key in KeyVault.""" + + key_vault_uri: Optional[str] = None + """The base URI of the KeyVault.""" + + tenant_id: Optional[str] = None + """The tenant id where the KeyVault lives.""" + + version: Optional[str] = None + """The current key version.""" + + def as_dict(self) -> dict: + """Serializes the AzureKeyInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.disk_encryption_set_id is not None: + body["disk_encryption_set_id"] = self.disk_encryption_set_id + if self.key_access_configuration: + body["key_access_configuration"] = self.key_access_configuration.as_dict() + if self.key_name is not None: + body["key_name"] = self.key_name + if self.key_vault_uri is not None: + body["key_vault_uri"] = self.key_vault_uri + if self.tenant_id is not None: + body["tenant_id"] = self.tenant_id + if self.version is not None: + body["version"] = self.version + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AzureKeyInfo into a shallow dictionary of its immediate attributes.""" + body = {} + if self.disk_encryption_set_id is not None: + body["disk_encryption_set_id"] = self.disk_encryption_set_id + if self.key_access_configuration: + body["key_access_configuration"] = self.key_access_configuration + if self.key_name is not None: + body["key_name"] = self.key_name + if self.key_vault_uri is not None: + body["key_vault_uri"] = self.key_vault_uri + if self.tenant_id is not None: + body["tenant_id"] = self.tenant_id + if self.version is not None: + body["version"] = self.version + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AzureKeyInfo: + """Deserializes the AzureKeyInfo from a dictionary.""" + return cls( + disk_encryption_set_id=d.get("disk_encryption_set_id", None), + key_access_configuration=_from_dict(d, "key_access_configuration", KeyAccessConfiguration), + key_name=d.get("key_name", None), + key_vault_uri=d.get("key_vault_uri", None), + tenant_id=d.get("tenant_id", None), + version=d.get("version", None), + ) + + @dataclass class AzureWorkspaceInfo: resource_group: Optional[str] = None @@ -130,8 +201,6 @@ def from_dict(cls, d: Dict[str, Any]) -> AzureWorkspaceInfo: @dataclass class CloudResourceContainer: - """The general workspace configurations that are specific to cloud providers.""" - gcp: Optional[CustomerFacingGcpCloudResourceContainer] = None def as_dict(self) -> dict: @@ -157,16 +226,18 @@ def from_dict(cls, d: Dict[str, Any]) -> CloudResourceContainer: @dataclass class CreateAwsKeyInfo: key_arn: str - """The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS region is inferred from - the ARN.""" + """The AWS KMS key's Amazon Resource Name (ARN).""" key_alias: Optional[str] = None """The AWS KMS key alias.""" + key_region: Optional[str] = None + """The AWS KMS key region.""" + reuse_key_for_cluster_volumes: Optional[bool] = None - """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to `true` - or omitted, the key is also used to encrypt cluster EBS volumes. To not use this key also for - encrypting EBS volumes, set this to `false`.""" + """This field applies only if the `use_cases` property includes `STORAGE`. If this is set to true + or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to use this + key for encrypting EBS volumes, set to false.""" def as_dict(self) -> dict: """Serializes the CreateAwsKeyInfo into a dictionary suitable for use as a JSON request body.""" @@ -175,6 +246,8 @@ def as_dict(self) -> dict: body["key_alias"] = self.key_alias if self.key_arn is not None: body["key_arn"] = self.key_arn + if self.key_region is not None: + body["key_region"] = self.key_region if self.reuse_key_for_cluster_volumes is not None: body["reuse_key_for_cluster_volumes"] = self.reuse_key_for_cluster_volumes return body @@ -186,6 +259,8 @@ def as_shallow_dict(self) -> dict: body["key_alias"] = self.key_alias if self.key_arn is not None: body["key_arn"] = self.key_arn + if self.key_region is not None: + body["key_region"] = self.key_region if self.reuse_key_for_cluster_volumes is not None: body["reuse_key_for_cluster_volumes"] = self.reuse_key_for_cluster_volumes return body @@ -196,6 +271,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateAwsKeyInfo: return cls( key_alias=d.get("key_alias", None), key_arn=d.get("key_arn", None), + key_region=d.get("key_region", None), reuse_key_for_cluster_volumes=d.get("reuse_key_for_cluster_volumes", None), ) @@ -227,7 +303,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateCredentialAwsCredentials: @dataclass class CreateCredentialStsRole: role_arn: Optional[str] = None - """The Amazon Resource Name (ARN) of the cross account role.""" + """The Amazon Resource Name (ARN) of the cross account IAM role.""" def as_dict(self) -> dict: """Serializes the CreateCredentialStsRole into a dictionary suitable for use as a JSON request body.""" @@ -252,7 +328,8 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateCredentialStsRole: @dataclass class CreateGcpKeyInfo: kms_key_id: str - """The GCP KMS key's resource name""" + """Globally unique kms key resource id of the form + projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4""" def as_dict(self) -> dict: """Serializes the CreateGcpKeyInfo into a dictionary suitable for use as a JSON request body.""" @@ -332,13 +409,18 @@ def from_dict(cls, d: Dict[str, Any]) -> Credential: ) +class CustomerFacingComputeMode(Enum): + """Corresponds to compute mode defined here: + https://src.dev.databricks.com/databricks/universe@9076536b18479afd639d1c1f9dd5a59f72215e69/-/blob/central/api/common.proto?L872 + """ + + HYBRID = "HYBRID" + SERVERLESS = "SERVERLESS" + + @dataclass class CustomerFacingGcpCloudResourceContainer: - """The general workspace configurations that are specific to Google Cloud.""" - project_id: Optional[str] = None - """The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your - workspace.""" def as_dict(self) -> dict: """Serializes the CustomerFacingGcpCloudResourceContainer into a dictionary suitable for use as a JSON request body.""" @@ -360,6 +442,12 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomerFacingGcpCloudResourceContainer return cls(project_id=d.get("project_id", None)) +class CustomerFacingStorageMode(Enum): + + CUSTOMER_HOSTED = "CUSTOMER_HOSTED" + DEFAULT_STORAGE = "DEFAULT_STORAGE" + + @dataclass class CustomerManagedKey: account_id: Optional[str] = None @@ -367,6 +455,8 @@ class CustomerManagedKey: aws_key_info: Optional[AwsKeyInfo] = None + azure_key_info: Optional[AzureKeyInfo] = None + creation_time: Optional[int] = None """Time in epoch milliseconds when the customer key was created.""" @@ -385,6 +475,8 @@ def as_dict(self) -> dict: body["account_id"] = self.account_id if self.aws_key_info: body["aws_key_info"] = self.aws_key_info.as_dict() + if self.azure_key_info: + body["azure_key_info"] = self.azure_key_info.as_dict() if self.creation_time is not None: body["creation_time"] = self.creation_time if self.customer_managed_key_id is not None: @@ -402,6 +494,8 @@ def as_shallow_dict(self) -> dict: body["account_id"] = self.account_id if self.aws_key_info: body["aws_key_info"] = self.aws_key_info + if self.azure_key_info: + body["azure_key_info"] = self.azure_key_info if self.creation_time is not None: body["creation_time"] = self.creation_time if self.customer_managed_key_id is not None: @@ -418,6 +512,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomerManagedKey: return cls( account_id=d.get("account_id", None), aws_key_info=_from_dict(d, "aws_key_info", AwsKeyInfo), + azure_key_info=_from_dict(d, "azure_key_info", AzureKeyInfo), creation_time=d.get("creation_time", None), customer_managed_key_id=d.get("customer_managed_key_id", None), gcp_key_info=_from_dict(d, "gcp_key_info", GcpKeyInfo), @@ -425,37 +520,15 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomerManagedKey: ) -@dataclass -class DeleteResponse: - def as_dict(self) -> dict: - """Serializes the DeleteResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DeleteResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse: - """Deserializes the DeleteResponse from a dictionary.""" - return cls() - - class EndpointUseCase(Enum): - """This enumeration represents the type of Databricks VPC [endpoint service] that was used when - creating this VPC endpoint. - - [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html""" DATAPLANE_RELAY_ACCESS = "DATAPLANE_RELAY_ACCESS" WORKSPACE_ACCESS = "WORKSPACE_ACCESS" class ErrorType(Enum): - """The AWS resource associated with this error: credentials, VPC, subnet, security group, or - network ACL.""" + """ErrorType and WarningType are used to represent the type of error or warning by NetworkHealth + and NetworkWarning defined in central/api/accounts/accounts.proto""" CREDENTIALS = "credentials" NETWORK_ACL = "networkAcl" @@ -465,52 +538,49 @@ class ErrorType(Enum): @dataclass -class ExternalCustomerInfo: - authoritative_user_email: Optional[str] = None - """Email of the authoritative user.""" +class GcpCommonNetworkConfig: + """The shared network config for GCP workspace. This object has common network configurations that + are network attributions of a workspace. DEPRECATED. Use GkeConfig instead.""" - authoritative_user_full_name: Optional[str] = None - """The authoritative user full name.""" + gke_cluster_master_ip_range: Optional[str] = None + """The IP range that will be used to allocate GKE cluster master resources from. This field must + not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.""" - customer_name: Optional[str] = None - """The legal entity name for the external workspace""" + gke_connectivity_type: Optional[GkeConfigConnectivityType] = None + """The type of network connectivity of the GKE cluster.""" def as_dict(self) -> dict: - """Serializes the ExternalCustomerInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.authoritative_user_email is not None: - body["authoritative_user_email"] = self.authoritative_user_email - if self.authoritative_user_full_name is not None: - body["authoritative_user_full_name"] = self.authoritative_user_full_name - if self.customer_name is not None: - body["customer_name"] = self.customer_name + """Serializes the GcpCommonNetworkConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.gke_cluster_master_ip_range is not None: + body["gke_cluster_master_ip_range"] = self.gke_cluster_master_ip_range + if self.gke_connectivity_type is not None: + body["gke_connectivity_type"] = self.gke_connectivity_type.value return body def as_shallow_dict(self) -> dict: - """Serializes the ExternalCustomerInfo into a shallow dictionary of its immediate attributes.""" - body = {} - if self.authoritative_user_email is not None: - body["authoritative_user_email"] = self.authoritative_user_email - if self.authoritative_user_full_name is not None: - body["authoritative_user_full_name"] = self.authoritative_user_full_name - if self.customer_name is not None: - body["customer_name"] = self.customer_name + """Serializes the GcpCommonNetworkConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.gke_cluster_master_ip_range is not None: + body["gke_cluster_master_ip_range"] = self.gke_cluster_master_ip_range + if self.gke_connectivity_type is not None: + body["gke_connectivity_type"] = self.gke_connectivity_type return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ExternalCustomerInfo: - """Deserializes the ExternalCustomerInfo from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> GcpCommonNetworkConfig: + """Deserializes the GcpCommonNetworkConfig from a dictionary.""" return cls( - authoritative_user_email=d.get("authoritative_user_email", None), - authoritative_user_full_name=d.get("authoritative_user_full_name", None), - customer_name=d.get("customer_name", None), + gke_cluster_master_ip_range=d.get("gke_cluster_master_ip_range", None), + gke_connectivity_type=_enum(d, "gke_connectivity_type", GkeConfigConnectivityType), ) @dataclass class GcpKeyInfo: kms_key_id: str - """The GCP KMS key's resource name""" + """Globally unique kms key resource id of the form + projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4""" def as_dict(self) -> dict: """Serializes the GcpKeyInfo into a dictionary suitable for use as a JSON request body.""" @@ -534,37 +604,17 @@ def from_dict(cls, d: Dict[str, Any]) -> GcpKeyInfo: @dataclass class GcpManagedNetworkConfig: - """The network settings for the workspace. The configurations are only for Databricks-managed VPCs. - It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP - range configurations must be mutually exclusive. An attempt to create a workspace fails if - Databricks detects an IP range overlap. - - Specify custom IP ranges in CIDR format. The IP ranges for these fields must not overlap, and - all IP addresses must be entirely within the following ranges: `10.0.0.0/8`, `100.64.0.0/10`, - `172.16.0.0/12`, `192.168.0.0/16`, and `240.0.0.0/4`. - - The sizes of these IP ranges affect the maximum number of nodes for the workspace. - - **Important**: Confirm the IP ranges used by your Databricks workspace before creating the - workspace. You cannot change them after your workspace is deployed. If the IP address ranges for - your Databricks are too small, IP exhaustion can occur, causing your Databricks jobs to fail. To - determine the address range sizes that you need, Databricks provides a calculator as a Microsoft - Excel spreadsheet. See [calculate subnet sizes for a new workspace]. - - [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html - """ + """The network configuration for the workspace.""" gke_cluster_pod_ip_range: Optional[str] = None - """The IP range from which to allocate GKE cluster pods. No bigger than `/9` and no smaller than - `/21`.""" + """The IP range that will be used to allocate GKE cluster Pods from.""" gke_cluster_service_ip_range: Optional[str] = None - """The IP range from which to allocate GKE cluster services. No bigger than `/16` and no smaller - than `/27`.""" + """The IP range that will be used to allocate GKE cluster Services from.""" subnet_cidr: Optional[str] = None - """The IP range from which to allocate GKE cluster nodes. No bigger than `/9` and no smaller than - `/29`.""" + """The IP range which will be used to allocate GKE cluster nodes from. Note: Pods, services and + master IP range must be mutually exclusive.""" def as_dict(self) -> dict: """Serializes the GcpManagedNetworkConfig into a dictionary suitable for use as a JSON request body.""" @@ -600,29 +650,24 @@ def from_dict(cls, d: Dict[str, Any]) -> GcpManagedNetworkConfig: @dataclass class GcpNetworkInfo: - """The Google Cloud specific information for this network (for example, the VPC ID, subnet ID, and - secondary IP ranges).""" - network_project_id: str - """The Google Cloud project ID of the VPC network.""" + """The GCP project ID for network resources. This project is where the VPC and subnet resides.""" vpc_id: str - """The ID of the VPC associated with this network. VPC IDs can be used in multiple network - configurations.""" + """The customer-provided VPC ID.""" subnet_id: str - """The ID of the subnet associated with this network.""" + """The customer-provided Subnet ID that will be available to Clusters in Workspaces using this + Network.""" subnet_region: str - """The Google Cloud region of the workspace data plane (for example, `us-east4`).""" pod_ip_range_name: str - """The name of the secondary IP range for pods. A Databricks-managed GKE cluster uses this IP range - for its pods. This secondary IP range can be used by only one workspace.""" + """Name of the secondary range within the subnet that will be used by GKE as Pod IP range. This is + BYO VPC specific. DB VPC uses network.getGcpManagedNetworkConfig.getGkeClusterPodIpRange""" service_ip_range_name: str - """The name of the secondary IP range for services. A Databricks-managed GKE cluster uses this IP - range for its services. This secondary IP range can be used by only one workspace.""" + """Name of the secondary range within the subnet that will be used by GKE as Service IP range.""" def as_dict(self) -> dict: """Serializes the GcpNetworkInfo into a dictionary suitable for use as a JSON request body.""" @@ -673,22 +718,15 @@ def from_dict(cls, d: Dict[str, Any]) -> GcpNetworkInfo: @dataclass class GcpVpcEndpointInfo: - """The Google Cloud specific information for this Private Service Connect endpoint.""" - project_id: str - """The Google Cloud project ID of the VPC network where the PSC connection resides.""" psc_endpoint_name: str - """The name of the PSC endpoint in the Google Cloud project.""" endpoint_region: str - """Region of the PSC endpoint.""" psc_connection_id: Optional[str] = None - """The unique ID of this PSC connection.""" service_attachment_id: Optional[str] = None - """The service attachment this PSC connection connects to.""" def as_dict(self) -> dict: """Serializes the GcpVpcEndpointInfo into a dictionary suitable for use as a JSON request body.""" @@ -734,22 +772,14 @@ def from_dict(cls, d: Dict[str, Any]) -> GcpVpcEndpointInfo: @dataclass class GkeConfig: - """The configurations for the GKE cluster of a Databricks workspace.""" + """The configurations of the GKE cluster used by the GCP workspace.""" connectivity_type: Optional[GkeConfigConnectivityType] = None - """Specifies the network connectivity types for the GKE nodes and the GKE master network. - - Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE nodes - will not have public IPs. - - Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE cluster - have public IP addresses.""" + """The type of network connectivity of the GKE cluster.""" master_ip_range: Optional[str] = None - """The IP range from which to allocate GKE cluster master resources. This field will be ignored if - GKE private cluster is not enabled. - - It must be exactly as big as `/28`.""" + """The IP range that will be used to allocate GKE cluster master resources from. This field must + not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.""" def as_dict(self) -> dict: """Serializes the GkeConfig into a dictionary suitable for use as a JSON request body.""" @@ -791,10 +821,33 @@ class GkeConfigConnectivityType(Enum): PUBLIC_NODE_PUBLIC_MASTER = "PUBLIC_NODE_PUBLIC_MASTER" +@dataclass +class KeyAccessConfiguration: + """The credential ID that is used to access the key vault.""" + + credential_id: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the KeyAccessConfiguration into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.credential_id is not None: + body["credential_id"] = self.credential_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the KeyAccessConfiguration into a shallow dictionary of its immediate attributes.""" + body = {} + if self.credential_id is not None: + body["credential_id"] = self.credential_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> KeyAccessConfiguration: + """Deserializes the KeyAccessConfiguration from a dictionary.""" + return cls(credential_id=d.get("credential_id", None)) + + class KeyUseCase(Enum): - """Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data in the control - plane * `STORAGE`: Encrypts the workspace's root S3 bucket (root DBFS and system data) and, - optionally, cluster EBS volumes.""" MANAGED_SERVICES = "MANAGED_SERVICES" STORAGE = "STORAGE" @@ -820,8 +873,12 @@ class Network: """The human-readable name of the network configuration.""" security_group_ids: Optional[List[str]] = None + """IDs of one to five security groups associated with this network. Security group IDs **cannot** + be used in multiple network configurations.""" subnet_ids: Optional[List[str]] = None + """IDs of at least two subnets associated with this network. Subnet IDs **cannot** be used in + multiple network configurations.""" vpc_endpoints: Optional[NetworkVpcEndpoints] = None @@ -952,18 +1009,13 @@ def from_dict(cls, d: Dict[str, Any]) -> NetworkHealth: @dataclass class NetworkVpcEndpoints: - """If specified, contains the VPC endpoints used to allow cluster communication from this VPC over - [AWS PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink/""" - - rest_api: List[str] - """The VPC endpoint ID used by this network to access the Databricks REST API.""" - - dataplane_relay: List[str] + dataplane_relay: Optional[List[str]] = None """The VPC endpoint ID used by this network to access the Databricks secure cluster connectivity relay.""" + rest_api: Optional[List[str]] = None + """The VPC endpoint ID used by this network to access the Databricks REST API.""" + def as_dict(self) -> dict: """Serializes the NetworkVpcEndpoints into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1020,9 +1072,6 @@ def from_dict(cls, d: Dict[str, Any]) -> NetworkWarning: class PricingTier(Enum): - """The pricing tier of the workspace. For pricing tier information, see [AWS Pricing]. - - [AWS Pricing]: https://databricks.com/product/aws-pricing""" COMMUNITY_EDITION = "COMMUNITY_EDITION" DEDICATED = "DEDICATED" @@ -1033,11 +1082,6 @@ class PricingTier(Enum): class PrivateAccessLevel(Enum): - """The private access level controls which VPC endpoints can connect to the UI or API of any - workspace that attaches this private access settings object. * `ACCOUNT` level access (the - default) allows only VPC endpoints that are registered in your Databricks account connect to - your workspace. * `ENDPOINT` level access allows only specified VPC endpoints connect to your - workspace. For details, see `allowed_vpc_endpoint_ids`.""" ACCOUNT = "ACCOUNT" ENDPOINT = "ENDPOINT" @@ -1045,13 +1089,26 @@ class PrivateAccessLevel(Enum): @dataclass class PrivateAccessSettings: + """*""" + account_id: Optional[str] = None - """The Databricks account ID that hosts the credential.""" + """The Databricks account ID that hosts the private access settings.""" allowed_vpc_endpoint_ids: Optional[List[str]] = None - """An array of Databricks VPC endpoint IDs.""" + """An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when + registering the VPC endpoint configuration in your Databricks account. This is not the ID of the + VPC endpoint in AWS. Only used when private_access_level is set to ENDPOINT. This is an allow + list of VPC endpoints that in your account that can connect to your workspace over AWS + PrivateLink. If hybrid access to your workspace is enabled by setting public_access_enabled to + true, this control only works for PrivateLink connections. To control how your workspace is + accessed via public internet, see IP access lists.""" private_access_level: Optional[PrivateAccessLevel] = None + """The private access level controls which VPC endpoints can connect to the UI or API of any + workspace that attaches this private access settings object. `ACCOUNT` level access (the + default) allows only VPC endpoints that are registered in your Databricks account connect to + your workspace. `ENDPOINT` level access allows only specified VPC endpoints connect to your + workspace. For details, see allowed_vpc_endpoint_ids.""" private_access_settings_id: Optional[str] = None """Databricks private access settings ID.""" @@ -1061,12 +1118,11 @@ class PrivateAccessSettings: public_access_enabled: Optional[bool] = None """Determines if the workspace can be accessed over public internet. For fully private workspaces, - you can optionally specify `false`, but only if you implement both the front-end and the - back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is - enabled.""" + you can optionally specify false, but only if you implement both the front-end and the back-end + PrivateLink connections. Otherwise, specify true, which means that public access is enabled.""" region: Optional[str] = None - """The cloud region for workspaces attached to this private access settings object.""" + """The AWS region for workspaces attached to this private access settings object.""" def as_dict(self) -> dict: """Serializes the PrivateAccessSettings into a dictionary suitable for use as a JSON request body.""" @@ -1120,30 +1176,10 @@ def from_dict(cls, d: Dict[str, Any]) -> PrivateAccessSettings: ) -@dataclass -class ReplaceResponse: - def as_dict(self) -> dict: - """Serializes the ReplaceResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ReplaceResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ReplaceResponse: - """Deserializes the ReplaceResponse from a dictionary.""" - return cls() - - @dataclass class RootBucketInfo: - """Root S3 bucket information.""" - bucket_name: Optional[str] = None - """The name of the S3 bucket.""" + """Name of the S3 bucket""" def as_dict(self) -> dict: """Serializes the RootBucketInfo into a dictionary suitable for use as a JSON request body.""" @@ -1168,12 +1204,20 @@ def from_dict(cls, d: Dict[str, Any]) -> RootBucketInfo: @dataclass class StorageConfiguration: account_id: Optional[str] = None - """The Databricks account ID that hosts the credential.""" + """The Databricks account ID associated with this storage configuration.""" creation_time: Optional[int] = None """Time in epoch milliseconds when the storage configuration was created.""" + role_arn: Optional[str] = None + """Optional IAM role that is used to access the workspace catalog which is created during workspace + creation for UC by Default. If a storage configuration with this field populated is used to + create a workspace, then a workspace catalog is created together with the workspace. The + workspace catalog shares the root bucket with internal workspace storage (including DBFS root) + but uses a dedicated bucket path prefix.""" + root_bucket_info: Optional[RootBucketInfo] = None + """The root bucket information for the storage configuration.""" storage_configuration_id: Optional[str] = None """Databricks storage configuration ID.""" @@ -1188,6 +1232,8 @@ def as_dict(self) -> dict: body["account_id"] = self.account_id if self.creation_time is not None: body["creation_time"] = self.creation_time + if self.role_arn is not None: + body["role_arn"] = self.role_arn if self.root_bucket_info: body["root_bucket_info"] = self.root_bucket_info.as_dict() if self.storage_configuration_id is not None: @@ -1203,6 +1249,8 @@ def as_shallow_dict(self) -> dict: body["account_id"] = self.account_id if self.creation_time is not None: body["creation_time"] = self.creation_time + if self.role_arn is not None: + body["role_arn"] = self.role_arn if self.root_bucket_info: body["root_bucket_info"] = self.root_bucket_info if self.storage_configuration_id is not None: @@ -1217,6 +1265,7 @@ def from_dict(cls, d: Dict[str, Any]) -> StorageConfiguration: return cls( account_id=d.get("account_id", None), creation_time=d.get("creation_time", None), + role_arn=d.get("role_arn", None), root_bucket_info=_from_dict(d, "root_bucket_info", RootBucketInfo), storage_configuration_id=d.get("storage_configuration_id", None), storage_configuration_name=d.get("storage_configuration_name", None), @@ -1225,18 +1274,12 @@ def from_dict(cls, d: Dict[str, Any]) -> StorageConfiguration: @dataclass class StsRole: - external_id: Optional[str] = None - """The external ID that needs to be trusted by the cross-account role. This is always your - Databricks account ID.""" - role_arn: Optional[str] = None - """The Amazon Resource Name (ARN) of the cross account role.""" + """The Amazon Resource Name (ARN) of the cross account IAM role.""" def as_dict(self) -> dict: """Serializes the StsRole into a dictionary suitable for use as a JSON request body.""" body = {} - if self.external_id is not None: - body["external_id"] = self.external_id if self.role_arn is not None: body["role_arn"] = self.role_arn return body @@ -1244,8 +1287,6 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the StsRole into a shallow dictionary of its immediate attributes.""" body = {} - if self.external_id is not None: - body["external_id"] = self.external_id if self.role_arn is not None: body["role_arn"] = self.role_arn return body @@ -1253,31 +1294,16 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> StsRole: """Deserializes the StsRole from a dictionary.""" - return cls(external_id=d.get("external_id", None), role_arn=d.get("role_arn", None)) - - -@dataclass -class UpdateResponse: - def as_dict(self) -> dict: - """Serializes the UpdateResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the UpdateResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateResponse: - """Deserializes the UpdateResponse from a dictionary.""" - return cls() + return cls(role_arn=d.get("role_arn", None)) @dataclass class VpcEndpoint: + """*""" + account_id: Optional[str] = None - """The Databricks account ID that hosts the VPC endpoint configuration.""" + """The Databricks account ID that hosts the VPC endpoint configuration. TODO - This may signal an + OpenAPI diff; it does not show up in the generated spec""" aws_account_id: Optional[str] = None """The AWS Account in which the VPC endpoint object exists.""" @@ -1294,6 +1320,7 @@ class VpcEndpoint: """The ID of the VPC endpoint object in AWS.""" gcp_vpc_endpoint_info: Optional[GcpVpcEndpointInfo] = None + """The cloud info of this vpc endpoint. Info for a GCP vpc endpoint.""" region: Optional[str] = None """The AWS region in which this VPC endpoint object exists.""" @@ -1305,6 +1332,11 @@ class VpcEndpoint: [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html""" use_case: Optional[EndpointUseCase] = None + """This enumeration represents the type of Databricks VPC endpoint service that was used when + creating this VPC endpoint. If the VPC endpoint connects to the Databricks control plane for + either the front-end connection or the back-end REST API connection, the value is + WORKSPACE_ACCESS. If the VPC endpoint connects to the Databricks workspace for the back-end + secure cluster connectivity relay, the value is DATAPLANE_RELAY_ACCESS.""" vpc_endpoint_id: Optional[str] = None """Databricks VPC endpoint ID. This is the Databricks-specific name of the VPC endpoint. Do not @@ -1381,8 +1413,6 @@ def from_dict(cls, d: Dict[str, Any]) -> VpcEndpoint: class VpcStatus(Enum): - """The status of this network configuration object in terms of its use in a workspace: * - `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. * `WARNED`: Warned.""" BROKEN = "BROKEN" UNATTACHED = "UNATTACHED" @@ -1391,7 +1421,6 @@ class VpcStatus(Enum): class WarningType(Enum): - """The AWS resource associated with this warning: a subnet or a security group.""" SECURITY_GROUP = "securityGroup" SUBNET = "subnet" @@ -1403,7 +1432,6 @@ class Workspace: """Databricks account ID.""" aws_region: Optional[str] = None - """The AWS region of the workspace data plane (for example, `us-west-2`).""" azure_workspace_info: Optional[AzureWorkspaceInfo] = None @@ -1412,6 +1440,9 @@ class Workspace: cloud_resource_container: Optional[CloudResourceContainer] = None + compute_mode: Optional[CustomerFacingComputeMode] = None + """The compute mode of the workspace.""" + creation_time: Optional[int] = None """Time in epoch milliseconds when the workspace was created.""" @@ -1424,22 +1455,15 @@ class Workspace: characters. The key can be of maximum length of 127 characters, and cannot be empty.""" deployment_name: Optional[str] = None - """The deployment name defines part of the subdomain for the workspace. The workspace URL for web - application and REST APIs is `.cloud.databricks.com`. - - This value must be unique across all non-deleted deployments across all AWS regions.""" - external_customer_info: Optional[ExternalCustomerInfo] = None - """If this workspace is for a external customer, then external_customer_info is populated. If this - workspace is not for a external customer, then external_customer_info is empty.""" + expected_workspace_status: Optional[WorkspaceStatus] = None + """A client owned field used to indicate the workspace status that the client expects to be in. For + now this is only used to unblock Temporal workflow for GCP least privileged workspace.""" gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None gke_config: Optional[GkeConfig] = None - is_no_public_ip_enabled: Optional[bool] = None - """Whether no public IP is enabled for the workspace.""" - location: Optional[str] = None """The Google Cloud region of the workspace data plane in your Google account (for example, `us-east4`).""" @@ -1447,9 +1471,17 @@ class Workspace: managed_services_customer_managed_key_id: Optional[str] = None """ID of the key configuration for encrypting managed services.""" + network: Optional[WorkspaceNetwork] = None + """The network configuration for the workspace. + + DEPRECATED. Use `network_id` instead.""" + + network_connectivity_config_id: Optional[str] = None + """The object ID of network connectivity config.""" + network_id: Optional[str] = None - """The network configuration ID that is attached to the workspace. This field is available only if - the network is a customer-managed network.""" + """If this workspace is BYO VPC, then the network_id will be populated. If this workspace is not + BYO VPC, then the network_id will be empty.""" pricing_tier: Optional[PricingTier] = None @@ -1469,6 +1501,9 @@ class Workspace: storage_customer_managed_key_id: Optional[str] = None """ID of the key configuration for encrypting workspace storage.""" + storage_mode: Optional[CustomerFacingStorageMode] = None + """The storage mode of the workspace.""" + workspace_id: Optional[int] = None """A unique integer ID for the workspace""" @@ -1476,6 +1511,7 @@ class Workspace: """The human-readable name of the workspace.""" workspace_status: Optional[WorkspaceStatus] = None + """The status of a workspace""" workspace_status_message: Optional[str] = None """Message describing the current workspace status.""" @@ -1493,6 +1529,8 @@ def as_dict(self) -> dict: body["cloud"] = self.cloud if self.cloud_resource_container: body["cloud_resource_container"] = self.cloud_resource_container.as_dict() + if self.compute_mode is not None: + body["compute_mode"] = self.compute_mode.value if self.creation_time is not None: body["creation_time"] = self.creation_time if self.credentials_id is not None: @@ -1501,18 +1539,20 @@ def as_dict(self) -> dict: body["custom_tags"] = self.custom_tags if self.deployment_name is not None: body["deployment_name"] = self.deployment_name - if self.external_customer_info: - body["external_customer_info"] = self.external_customer_info.as_dict() + if self.expected_workspace_status is not None: + body["expected_workspace_status"] = self.expected_workspace_status.value if self.gcp_managed_network_config: body["gcp_managed_network_config"] = self.gcp_managed_network_config.as_dict() if self.gke_config: body["gke_config"] = self.gke_config.as_dict() - if self.is_no_public_ip_enabled is not None: - body["is_no_public_ip_enabled"] = self.is_no_public_ip_enabled if self.location is not None: body["location"] = self.location if self.managed_services_customer_managed_key_id is not None: body["managed_services_customer_managed_key_id"] = self.managed_services_customer_managed_key_id + if self.network: + body["network"] = self.network.as_dict() + if self.network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = self.network_connectivity_config_id if self.network_id is not None: body["network_id"] = self.network_id if self.pricing_tier is not None: @@ -1523,6 +1563,8 @@ def as_dict(self) -> dict: body["storage_configuration_id"] = self.storage_configuration_id if self.storage_customer_managed_key_id is not None: body["storage_customer_managed_key_id"] = self.storage_customer_managed_key_id + if self.storage_mode is not None: + body["storage_mode"] = self.storage_mode.value if self.workspace_id is not None: body["workspace_id"] = self.workspace_id if self.workspace_name is not None: @@ -1546,6 +1588,8 @@ def as_shallow_dict(self) -> dict: body["cloud"] = self.cloud if self.cloud_resource_container: body["cloud_resource_container"] = self.cloud_resource_container + if self.compute_mode is not None: + body["compute_mode"] = self.compute_mode if self.creation_time is not None: body["creation_time"] = self.creation_time if self.credentials_id is not None: @@ -1554,18 +1598,20 @@ def as_shallow_dict(self) -> dict: body["custom_tags"] = self.custom_tags if self.deployment_name is not None: body["deployment_name"] = self.deployment_name - if self.external_customer_info: - body["external_customer_info"] = self.external_customer_info + if self.expected_workspace_status is not None: + body["expected_workspace_status"] = self.expected_workspace_status if self.gcp_managed_network_config: body["gcp_managed_network_config"] = self.gcp_managed_network_config if self.gke_config: body["gke_config"] = self.gke_config - if self.is_no_public_ip_enabled is not None: - body["is_no_public_ip_enabled"] = self.is_no_public_ip_enabled if self.location is not None: body["location"] = self.location if self.managed_services_customer_managed_key_id is not None: body["managed_services_customer_managed_key_id"] = self.managed_services_customer_managed_key_id + if self.network: + body["network"] = self.network + if self.network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = self.network_connectivity_config_id if self.network_id is not None: body["network_id"] = self.network_id if self.pricing_tier is not None: @@ -1576,6 +1622,8 @@ def as_shallow_dict(self) -> dict: body["storage_configuration_id"] = self.storage_configuration_id if self.storage_customer_managed_key_id is not None: body["storage_customer_managed_key_id"] = self.storage_customer_managed_key_id + if self.storage_mode is not None: + body["storage_mode"] = self.storage_mode if self.workspace_id is not None: body["workspace_id"] = self.workspace_id if self.workspace_name is not None: @@ -1595,21 +1643,24 @@ def from_dict(cls, d: Dict[str, Any]) -> Workspace: azure_workspace_info=_from_dict(d, "azure_workspace_info", AzureWorkspaceInfo), cloud=d.get("cloud", None), cloud_resource_container=_from_dict(d, "cloud_resource_container", CloudResourceContainer), + compute_mode=_enum(d, "compute_mode", CustomerFacingComputeMode), creation_time=d.get("creation_time", None), credentials_id=d.get("credentials_id", None), custom_tags=d.get("custom_tags", None), deployment_name=d.get("deployment_name", None), - external_customer_info=_from_dict(d, "external_customer_info", ExternalCustomerInfo), + expected_workspace_status=_enum(d, "expected_workspace_status", WorkspaceStatus), gcp_managed_network_config=_from_dict(d, "gcp_managed_network_config", GcpManagedNetworkConfig), gke_config=_from_dict(d, "gke_config", GkeConfig), - is_no_public_ip_enabled=d.get("is_no_public_ip_enabled", None), location=d.get("location", None), managed_services_customer_managed_key_id=d.get("managed_services_customer_managed_key_id", None), + network=_from_dict(d, "network", WorkspaceNetwork), + network_connectivity_config_id=d.get("network_connectivity_config_id", None), network_id=d.get("network_id", None), pricing_tier=_enum(d, "pricing_tier", PricingTier), private_access_settings_id=d.get("private_access_settings_id", None), storage_configuration_id=d.get("storage_configuration_id", None), storage_customer_managed_key_id=d.get("storage_customer_managed_key_id", None), + storage_mode=_enum(d, "storage_mode", CustomerFacingStorageMode), workspace_id=d.get("workspace_id", None), workspace_name=d.get("workspace_name", None), workspace_status=_enum(d, "workspace_status", WorkspaceStatus), @@ -1617,9 +1668,65 @@ def from_dict(cls, d: Dict[str, Any]) -> Workspace: ) +@dataclass +class WorkspaceNetwork: + """The network configuration for workspaces.""" + + gcp_common_network_config: Optional[GcpCommonNetworkConfig] = None + """The shared network config for GCP workspace. This object has common network configurations that + are network attributions of a workspace. This object is input-only.""" + + gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None + """The mutually exclusive network deployment modes. The option decides which network mode the + workspace will use. The network config for GCP workspace with Databricks managed network. This + object is input-only and will not be provided when listing workspaces. See + go/gcp-byovpc-alpha-design for interface decisions.""" + + network_id: Optional[str] = None + """The ID of the network object, if the workspace is a BYOVPC workspace. This should apply to + workspaces on all clouds in internal services. In accounts-rest-api, user will use + workspace.network_id for input and output instead. Currently (2021-06-19) the network ID is only + used by GCP.""" + + def as_dict(self) -> dict: + """Serializes the WorkspaceNetwork into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.gcp_common_network_config: + body["gcp_common_network_config"] = self.gcp_common_network_config.as_dict() + if self.gcp_managed_network_config: + body["gcp_managed_network_config"] = self.gcp_managed_network_config.as_dict() + if self.network_id is not None: + body["network_id"] = self.network_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the WorkspaceNetwork into a shallow dictionary of its immediate attributes.""" + body = {} + if self.gcp_common_network_config: + body["gcp_common_network_config"] = self.gcp_common_network_config + if self.gcp_managed_network_config: + body["gcp_managed_network_config"] = self.gcp_managed_network_config + if self.network_id is not None: + body["network_id"] = self.network_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> WorkspaceNetwork: + """Deserializes the WorkspaceNetwork from a dictionary.""" + return cls( + gcp_common_network_config=_from_dict(d, "gcp_common_network_config", GcpCommonNetworkConfig), + gcp_managed_network_config=_from_dict(d, "gcp_managed_network_config", GcpManagedNetworkConfig), + network_id=d.get("network_id", None), + ) + + class WorkspaceStatus(Enum): - """The status of the workspace. For workspace creation, usually it is set to `PROVISIONING` - initially. Continue to check the status until the status is `RUNNING`.""" + """The different statuses of a workspace. The following represents the current set of valid + transitions from status to status: NOT_PROVISIONED -> PROVISIONING -> CANCELLED PROVISIONING -> + RUNNING -> FAILED -> CANCELLED (note that this transition is disallowed in the MultiWorkspace + Project) RUNNING -> PROVISIONING -> BANNED -> CANCELLED FAILED -> PROVISIONING -> CANCELLED + BANNED -> RUNNING -> CANCELLED Note that a transition from any state to itself is also valid. + TODO(PLAT-5867): add a transition from CANCELLED to some other value (e.g. RECOVERING)""" BANNED = "BANNED" CANCELLING = "CANCELLING" @@ -1671,29 +1778,30 @@ def create(self, credentials_name: str, aws_credentials: CreateCredentialAwsCred res = self._api.do("POST", f"/api/2.0/accounts/{self._api.account_id}/credentials", body=body, headers=headers) return Credential.from_dict(res) - def delete(self, credentials_id: str): + def delete(self, credentials_id: str) -> Credential: """Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace. :param credentials_id: str Databricks Account API credential configuration ID - + :returns: :class:`Credential` """ headers = { "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/credentials/{credentials_id}", headers=headers ) + return Credential.from_dict(res) def get(self, credentials_id: str) -> Credential: """Gets a Databricks credential configuration object for an account, both specified by ID. :param credentials_id: str - Databricks Account API credential configuration ID + Credential configuration ID :returns: :class:`Credential` """ @@ -1708,7 +1816,7 @@ def get(self, credentials_id: str) -> Credential: return Credential.from_dict(res) def list(self) -> Iterator[Credential]: - """Gets all Databricks credential configurations associated with an account specified by ID. + """List Databricks credential configuration objects for an account, specified by ID. :returns: Iterator over :class:`Credential` @@ -1785,25 +1893,26 @@ def create( ) return CustomerManagedKey.from_dict(res) - def delete(self, customer_managed_key_id: str): + def delete(self, customer_managed_key_id: str) -> CustomerManagedKey: """Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace. :param customer_managed_key_id: str Databricks encryption key configuration ID. - + :returns: :class:`CustomerManagedKey` """ headers = { "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/customer-managed-keys/{customer_managed_key_id}", headers=headers, ) + return CustomerManagedKey.from_dict(res) def get(self, customer_managed_key_id: str) -> CustomerManagedKey: """Gets a customer-managed key configuration object for an account, specified by ID. This operation @@ -1837,16 +1946,7 @@ def get(self, customer_managed_key_id: str) -> CustomerManagedKey: return CustomerManagedKey.from_dict(res) def list(self) -> Iterator[CustomerManagedKey]: - """Gets all customer-managed key configuration objects for an account. If the key is specified as a - workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's - notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. - If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the - workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane. - - **Important**: Customer-managed keys are supported only for some deployment types, subscription types, - and AWS regions. - - This operation is available only if your account is on the E2 version of the platform. + """Lists Databricks customer-managed key configurations for an account. :returns: Iterator over :class:`CustomerManagedKey` @@ -1869,9 +1969,9 @@ def __init__(self, api_client): def create( self, - network_name: str, *, gcp_network_info: Optional[GcpNetworkInfo] = None, + network_name: Optional[str] = None, security_group_ids: Optional[List[str]] = None, subnet_ids: Optional[List[str]] = None, vpc_endpoints: Optional[NetworkVpcEndpoints] = None, @@ -1880,9 +1980,9 @@ def create( """Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets. - :param network_name: str - The human-readable name of the network configuration. :param gcp_network_info: :class:`GcpNetworkInfo` (optional) + :param network_name: str (optional) + The human-readable name of the network configuration. :param security_group_ids: List[str] (optional) IDs of one to five security groups associated with this network. Security group IDs **cannot** be used in multiple network configurations. @@ -1891,8 +1991,8 @@ def create( network configurations. :param vpc_endpoints: :class:`NetworkVpcEndpoints` (optional) :param vpc_id: str (optional) - The ID of the VPC associated with this network. VPC IDs can be used in multiple network - configurations. + The ID of the VPC associated with this network configuration. VPC IDs can be used in multiple + networks. :returns: :class:`Network` """ @@ -1917,7 +2017,7 @@ def create( res = self._api.do("POST", f"/api/2.0/accounts/{self._api.account_id}/networks", body=body, headers=headers) return Network.from_dict(res) - def delete(self, network_id: str): + def delete(self, network_id: str) -> Network: """Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace. @@ -1926,14 +2026,15 @@ def delete(self, network_id: str): :param network_id: str Databricks Account API network configuration ID. - + :returns: :class:`Network` """ headers = { "Accept": "application/json", } - self._api.do("DELETE", f"/api/2.0/accounts/{self._api.account_id}/networks/{network_id}", headers=headers) + res = self._api.do("DELETE", f"/api/2.0/accounts/{self._api.account_id}/networks/{network_id}", headers=headers) + return Network.from_dict(res) def get(self, network_id: str) -> Network: """Gets a Databricks network configuration, which represents a cloud VPC and its resources. @@ -1952,9 +2053,7 @@ def get(self, network_id: str) -> Network: return Network.from_dict(res) def list(self) -> Iterator[Network]: - """Gets a list of all Databricks network configurations for an account, specified by ID. - - This operation is available only if your account is on the E2 version of the platform. + """Lists Databricks network configurations for an account. :returns: Iterator over :class:`Network` @@ -1976,48 +2075,39 @@ def __init__(self, api_client): def create( self, - private_access_settings_name: str, - region: str, *, allowed_vpc_endpoint_ids: Optional[List[str]] = None, private_access_level: Optional[PrivateAccessLevel] = None, + private_access_settings_name: Optional[str] = None, public_access_enabled: Optional[bool] = None, + region: Optional[str] = None, ) -> PrivateAccessSettings: - """Creates a private access settings object, which specifies how your workspace is accessed over [AWS - PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object - referenced by ID in the workspace's `private_access_settings_id` property. - - You can share one private access settings with multiple workspaces in a single account. However, - private access settings are specific to AWS regions, so only workspaces in the same AWS region can use - a given private access settings object. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + """Creates a private access settings configuration, which represents network access restrictions for + workspace resources. Private access settings configure whether workspaces can be accessed from the + public internet or only from private endpoints. - :param private_access_settings_name: str - The human-readable name of the private access settings object. - :param region: str - The cloud region for workspaces associated with this private access settings object. :param allowed_vpc_endpoint_ids: List[str] (optional) - An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering - the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in - AWS. - - Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints - that in your account that can connect to your workspace over AWS PrivateLink. - - If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this - control only works for PrivateLink connections. To control how your workspace is accessed via public - internet, see [IP access lists]. - - [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html + An array of Databricks VPC endpoint IDs. This is the Databricks ID returned when registering the VPC + endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in AWS. + Only used when private_access_level is set to ENDPOINT. This is an allow list of VPC endpoints + registered in your Databricks account that can connect to your workspace over AWS PrivateLink. Note: + If hybrid access to your workspace is enabled by setting public_access_enabled to true, this control + only works for PrivateLink connections. To control how your workspace is accessed via public + internet, see IP access lists. :param private_access_level: :class:`PrivateAccessLevel` (optional) + The private access level controls which VPC endpoints can connect to the UI or API of any workspace + that attaches this private access settings object. `ACCOUNT` level access (the default) allows only + VPC endpoints that are registered in your Databricks account connect to your workspace. `ENDPOINT` + level access allows only specified VPC endpoints connect to your workspace. For details, see + allowed_vpc_endpoint_ids. + :param private_access_settings_name: str (optional) + The human-readable name of the private access settings object. :param public_access_enabled: bool (optional) Determines if the workspace can be accessed over public internet. For fully private workspaces, you - can optionally specify `false`, but only if you implement both the front-end and the back-end - PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled. + can optionally specify false, but only if you implement both the front-end and the back-end + PrivateLink connections. Otherwise, specify true, which means that public access is enabled. + :param region: str (optional) + The AWS region for workspaces attached to this private access settings object. :returns: :class:`PrivateAccessSettings` """ @@ -2042,42 +2132,29 @@ def create( ) return PrivateAccessSettings.from_dict(res) - def delete(self, private_access_settings_id: str): - """Deletes a private access settings object, which determines how your workspace is accessed over [AWS - PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + def delete(self, private_access_settings_id: str) -> PrivateAccessSettings: + """Deletes a Databricks private access settings configuration, both specified by ID. :param private_access_settings_id: str - Databricks Account API private access settings ID. - + :returns: :class:`PrivateAccessSettings` """ headers = { "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/private-access-settings/{private_access_settings_id}", headers=headers, ) + return PrivateAccessSettings.from_dict(res) def get(self, private_access_settings_id: str) -> PrivateAccessSettings: - """Gets a private access settings object, which specifies how your workspace is accessed over [AWS - PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + """Gets a Databricks private access settings configuration, both specified by ID. :param private_access_settings_id: str - Databricks Account API private access settings ID. :returns: :class:`PrivateAccessSettings` """ @@ -2094,7 +2171,7 @@ def get(self, private_access_settings_id: str) -> PrivateAccessSettings: return PrivateAccessSettings.from_dict(res) def list(self) -> Iterator[PrivateAccessSettings]: - """Gets a list of all private access settings objects for an account, specified by ID. + """Lists Databricks private access settings for an account. :returns: Iterator over :class:`PrivateAccessSettings` @@ -2108,82 +2185,39 @@ def list(self) -> Iterator[PrivateAccessSettings]: return [PrivateAccessSettings.from_dict(v) for v in res] def replace( - self, - private_access_settings_id: str, - private_access_settings_name: str, - region: str, - *, - allowed_vpc_endpoint_ids: Optional[List[str]] = None, - private_access_level: Optional[PrivateAccessLevel] = None, - public_access_enabled: Optional[bool] = None, - ): + self, private_access_settings_id: str, customer_facing_private_access_settings: PrivateAccessSettings + ) -> PrivateAccessSettings: """Updates an existing private access settings object, which specifies how your workspace is accessed - over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object - referenced by ID in the workspace's `private_access_settings_id` property. - - This operation completely overwrites your existing private access settings object attached to your - workspaces. All workspaces attached to the private access settings are affected by any change. If - `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of - these changes might take several minutes to propagate to the workspace API. - - You can share one private access settings object with multiple workspaces in a single account. - However, private access settings are specific to AWS regions, so only workspaces in the same AWS - region can use a given private access settings object. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object + referenced by ID in the workspace's private_access_settings_id property. This operation completely + overwrites your existing private access settings object attached to your workspaces. All workspaces + attached to the private access settings are affected by any change. If public_access_enabled, + private_access_level, or allowed_vpc_endpoint_ids are updated, effects of these changes might take + several minutes to propagate to the workspace API. You can share one private access settings object + with multiple workspaces in a single account. However, private access settings are specific to AWS + regions, so only workspaces in the same AWS region can use a given private access settings object. + Before configuring PrivateLink, read the Databricks article about PrivateLink. :param private_access_settings_id: str - Databricks Account API private access settings ID. - :param private_access_settings_name: str - The human-readable name of the private access settings object. - :param region: str - The cloud region for workspaces associated with this private access settings object. - :param allowed_vpc_endpoint_ids: List[str] (optional) - An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering - the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in - AWS. - - Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints - that in your account that can connect to your workspace over AWS PrivateLink. - - If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this - control only works for PrivateLink connections. To control how your workspace is accessed via public - internet, see [IP access lists]. - - [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html - :param private_access_level: :class:`PrivateAccessLevel` (optional) - :param public_access_enabled: bool (optional) - Determines if the workspace can be accessed over public internet. For fully private workspaces, you - can optionally specify `false`, but only if you implement both the front-end and the back-end - PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled. - + Databricks private access settings ID. + :param customer_facing_private_access_settings: :class:`PrivateAccessSettings` + Properties of the new private access settings object. + :returns: :class:`PrivateAccessSettings` """ - body = {} - if allowed_vpc_endpoint_ids is not None: - body["allowed_vpc_endpoint_ids"] = [v for v in allowed_vpc_endpoint_ids] - if private_access_level is not None: - body["private_access_level"] = private_access_level.value - if private_access_settings_name is not None: - body["private_access_settings_name"] = private_access_settings_name - if public_access_enabled is not None: - body["public_access_enabled"] = public_access_enabled - if region is not None: - body["region"] = region + body = customer_facing_private_access_settings.as_dict() headers = { "Accept": "application/json", "Content-Type": "application/json", } - self._api.do( + res = self._api.do( "PUT", f"/api/2.0/accounts/{self._api.account_id}/private-access-settings/{private_access_settings_id}", body=body, headers=headers, ) + return PrivateAccessSettings.from_dict(res) class StorageAPI: @@ -2195,24 +2229,27 @@ class StorageAPI: def __init__(self, api_client): self._api = api_client - def create(self, storage_configuration_name: str, root_bucket_info: RootBucketInfo) -> StorageConfiguration: - """Creates new storage configuration for an account, specified by ID. Uploads a storage configuration - object that represents the root AWS S3 bucket in your account. Databricks stores related workspace - assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the - required bucket policy. - - For information about how to create a new workspace with this API, see [Create a new workspace using - the Account API] - - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + def create( + self, storage_configuration_name: str, root_bucket_info: RootBucketInfo, *, role_arn: Optional[str] = None + ) -> StorageConfiguration: + """Creates a Databricks storage configuration for an account. :param storage_configuration_name: str The human-readable name of the storage configuration. :param root_bucket_info: :class:`RootBucketInfo` + Root S3 bucket information. + :param role_arn: str (optional) + Optional IAM role that is used to access the workspace catalog which is created during workspace + creation for UC by Default. If a storage configuration with this field populated is used to create a + workspace, then a workspace catalog is created together with the workspace. The workspace catalog + shares the root bucket with internal workspace storage (including DBFS root) but uses a dedicated + bucket path prefix. :returns: :class:`StorageConfiguration` """ body = {} + if role_arn is not None: + body["role_arn"] = role_arn if root_bucket_info is not None: body["root_bucket_info"] = root_bucket_info.as_dict() if storage_configuration_name is not None: @@ -2227,31 +2264,30 @@ def create(self, storage_configuration_name: str, root_bucket_info: RootBucketIn ) return StorageConfiguration.from_dict(res) - def delete(self, storage_configuration_id: str): + def delete(self, storage_configuration_id: str) -> StorageConfiguration: """Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace. :param storage_configuration_id: str - Databricks Account API storage configuration ID. - + :returns: :class:`StorageConfiguration` """ headers = { "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/storage-configurations/{storage_configuration_id}", headers=headers, ) + return StorageConfiguration.from_dict(res) def get(self, storage_configuration_id: str) -> StorageConfiguration: """Gets a Databricks storage configuration for an account, both specified by ID. :param storage_configuration_id: str - Databricks Account API storage configuration ID. :returns: :class:`StorageConfiguration` """ @@ -2268,7 +2304,7 @@ def get(self, storage_configuration_id: str) -> StorageConfiguration: return StorageConfiguration.from_dict(res) def list(self) -> Iterator[StorageConfiguration]: - """Gets a list of all Databricks storage configurations for your account, specified by ID. + """Lists Databricks storage configurations for an account, specified by ID. :returns: Iterator over :class:`StorageConfiguration` @@ -2290,11 +2326,11 @@ def __init__(self, api_client): def create( self, - vpc_endpoint_name: str, *, aws_vpc_endpoint_id: Optional[str] = None, gcp_vpc_endpoint_info: Optional[GcpVpcEndpointInfo] = None, region: Optional[str] = None, + vpc_endpoint_name: Optional[str] = None, ) -> VpcEndpoint: """Creates a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to communicate privately with Databricks over [AWS PrivateLink]. @@ -2309,13 +2345,14 @@ def create( [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html - :param vpc_endpoint_name: str - The human-readable name of the storage configuration. :param aws_vpc_endpoint_id: str (optional) The ID of the VPC endpoint object in AWS. :param gcp_vpc_endpoint_info: :class:`GcpVpcEndpointInfo` (optional) + The cloud info of this vpc endpoint. :param region: str (optional) - The AWS region in which this VPC endpoint object exists. + The region in which this VPC endpoint object exists. + :param vpc_endpoint_name: str (optional) + The human-readable name of the storage configuration. :returns: :class:`VpcEndpoint` """ @@ -2338,29 +2375,23 @@ def create( ) return VpcEndpoint.from_dict(res) - def delete(self, vpc_endpoint_id: str): - """Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] that can communicate - privately with Databricks over [AWS PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + def delete(self, vpc_endpoint_id: str) -> VpcEndpoint: + """Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration that + is associated with any workspace. :param vpc_endpoint_id: str - Databricks VPC endpoint ID. - + :returns: :class:`VpcEndpoint` """ headers = { "Accept": "application/json", } - self._api.do( + res = self._api.do( "DELETE", f"/api/2.0/accounts/{self._api.account_id}/vpc-endpoints/{vpc_endpoint_id}", headers=headers ) + return VpcEndpoint.from_dict(res) def get(self, vpc_endpoint_id: str) -> VpcEndpoint: """Gets a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to communicate @@ -2385,11 +2416,7 @@ def get(self, vpc_endpoint_id: str) -> VpcEndpoint: return VpcEndpoint.from_dict(res) def list(self) -> Iterator[VpcEndpoint]: - """Gets a list of all VPC endpoints for an account, specified by ID. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + """Lists Databricks VPC endpoint configurations for an account. :returns: Iterator over :class:`VpcEndpoint` @@ -2448,17 +2475,16 @@ def wait_get_workspace_running( def create( self, - workspace_name: str, *, aws_region: Optional[str] = None, cloud: Optional[str] = None, cloud_resource_container: Optional[CloudResourceContainer] = None, + compute_mode: Optional[CustomerFacingComputeMode] = None, credentials_id: Optional[str] = None, custom_tags: Optional[Dict[str, str]] = None, deployment_name: Optional[str] = None, gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None, gke_config: Optional[GkeConfig] = None, - is_no_public_ip_enabled: Optional[bool] = None, location: Optional[str] = None, managed_services_customer_managed_key_id: Optional[str] = None, network_id: Optional[str] = None, @@ -2466,24 +2492,48 @@ def create( private_access_settings_id: Optional[str] = None, storage_configuration_id: Optional[str] = None, storage_customer_managed_key_id: Optional[str] = None, + workspace_name: Optional[str] = None, ) -> Wait[Workspace]: - """Creates a new workspace. + """Creates a new workspace using a credential configuration and a storage configuration, an optional + network configuration (if using a customer-managed VPC), an optional managed services key + configuration (if using customer-managed keys for managed services), and an optional storage key + configuration (if using customer-managed keys for storage). The key configurations used for managed + services and storage encryption can be the same or different. + + Important: This operation is asynchronous. A response with HTTP status code 200 means the request has + been accepted and is in progress, but does not mean that the workspace deployed successfully and is + running. The initial workspace status is typically PROVISIONING. Use the workspace ID (workspace_id) + field in the response to identify the new workspace and make repeated GET requests with the workspace + ID and check its status. The workspace becomes available when the status changes to RUNNING. + + You can share one customer-managed VPC with multiple workspaces in a single account. It is not + required to create a new VPC for each workspace. However, you cannot reuse subnets or Security Groups + between workspaces. If you plan to share one VPC with multiple workspaces, make sure you size your VPC + and subnets accordingly. Because a Databricks Account API network configuration encapsulates this + information, you cannot reuse a Databricks Account API network configuration across workspaces. + + For information about how to create a new workspace with this API including error handling, see + [Create a new workspace using the Account API]. + + Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a limited + set of deployment and subscription types. If you have questions about availability, contact your + Databricks representative. - **Important**: This operation is asynchronous. A response with HTTP status code 200 means the request - has been accepted and is in progress, but does not mean that the workspace deployed successfully and - is running. The initial workspace status is typically `PROVISIONING`. Use the workspace ID - (`workspace_id`) field in the response to identify the new workspace and make repeated `GET` requests - with the workspace ID and check its status. The workspace becomes available when the status changes to - `RUNNING`. + This operation is available only if your account is on the E2 version of the platform or on a select + custom plan that allows multiple workspaces per account. + + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - :param workspace_name: str - The workspace's human-readable name. :param aws_region: str (optional) - The AWS region of the workspace's data plane. :param cloud: str (optional) - The cloud provider which the workspace uses. For Google Cloud workspaces, always set this field to - `gcp`. + The cloud name. This field always has the value `gcp`. :param cloud_resource_container: :class:`CloudResourceContainer` (optional) + :param compute_mode: :class:`CustomerFacingComputeMode` (optional) + If the compute mode is `SERVERLESS`, a serverless workspace is created that comes pre-configured + with serverless compute and default storage, providing a fully-managed, enterprise-ready SaaS + experience. This means you don't need to provide any resources managed by you, such as credentials, + storage, or network. If the compute mode is `HYBRID` (which is the default option), a classic + workspace is created that uses customer-managed resources. :param credentials_id: str (optional) ID of the workspace's credential configuration object. :param custom_tags: Dict[str,str] (optional) @@ -2492,55 +2542,49 @@ def create( key can be of maximum length of 127 characters, and cannot be empty. :param deployment_name: str (optional) The deployment name defines part of the subdomain for the workspace. The workspace URL for the web - application and REST APIs is `.cloud.databricks.com`. For example, if the - deployment name is `abcsales`, your workspace URL will be `https://abcsales.cloud.databricks.com`. + application and REST APIs is .cloud.databricks.com. For example, if the + deployment name is abcsales, your workspace URL will be https://abcsales.cloud.databricks.com. Hyphens are allowed. This property supports only the set of characters that are allowed in a - subdomain. - - To set this value, you must have a deployment name prefix. Contact your Databricks account team to - add an account deployment name prefix to your account. - - Workspace deployment names follow the account prefix and a hyphen. For example, if your account's - deployment prefix is `acme` and the workspace deployment name is `workspace-1`, the JSON response - for the `deployment_name` field becomes `acme-workspace-1`. The workspace URL would be - `acme-workspace-1.cloud.databricks.com`. - - You can also set the `deployment_name` to the reserved keyword `EMPTY` if you want the deployment - name to only include the deployment prefix. For example, if your account's deployment prefix is - `acme` and the workspace deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and - the workspace URL is `acme.cloud.databricks.com`. - - This value must be unique across all non-deleted deployments across all AWS regions. - - If a new workspace omits this property, the server generates a unique deployment name for you with - the pattern `dbc-xxxxxxxx-xxxx`. + subdomain. To set this value, you must have a deployment name prefix. Contact your Databricks + account team to add an account deployment name prefix to your account. Workspace deployment names + follow the account prefix and a hyphen. For example, if your account's deployment prefix is acme and + the workspace deployment name is workspace-1, the JSON response for the deployment_name field + becomes acme-workspace-1. The workspace URL would be acme-workspace-1.cloud.databricks.com. You can + also set the deployment_name to the reserved keyword EMPTY if you want the deployment name to only + include the deployment prefix. For example, if your account's deployment prefix is acme and the + workspace deployment name is EMPTY, the deployment_name becomes acme only and the workspace URL is + acme.cloud.databricks.com. This value must be unique across all non-deleted deployments across all + AWS regions. If a new workspace omits this property, the server generates a unique deployment name + for you with the pattern dbc-xxxxxxxx-xxxx. :param gcp_managed_network_config: :class:`GcpManagedNetworkConfig` (optional) :param gke_config: :class:`GkeConfig` (optional) - :param is_no_public_ip_enabled: bool (optional) - Whether no public IP is enabled for the workspace. :param location: str (optional) - The Google Cloud region of the workspace data plane in your Google account. For example, `us-east4`. + The Google Cloud region of the workspace data plane in your Google account (for example, + `us-east4`). :param managed_services_customer_managed_key_id: str (optional) The ID of the workspace's managed services encryption key configuration object. This is used to help protect and control access to the workspace's notebooks, secrets, Databricks SQL queries, and query - history. The provided key configuration object property `use_cases` must contain `MANAGED_SERVICES`. + history. The provided key configuration object property use_cases must contain MANAGED_SERVICES. :param network_id: str (optional) + The ID of the workspace's network configuration object. To use AWS PrivateLink, this field is + required. :param pricing_tier: :class:`PricingTier` (optional) :param private_access_settings_id: str (optional) - ID of the workspace's private access settings object. Only used for PrivateLink. This ID must be - specified for customers using [AWS PrivateLink] for either front-end (user-to-workspace connection), - back-end (data plane to control plane connection), or both connection types. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink].", + ID of the workspace's private access settings object. Only used for PrivateLink. You must specify + this ID if you are using [AWS PrivateLink] for either front-end (user-to-workspace connection), + back-end (data plane to control plane connection), or both connection types. Before configuring + PrivateLink, read the [Databricks article about PrivateLink].", [AWS PrivateLink]: https://aws.amazon.com/privatelink/ [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html :param storage_configuration_id: str (optional) - The ID of the workspace's storage configuration object. + ID of the workspace's storage configuration object. :param storage_customer_managed_key_id: str (optional) The ID of the workspace's storage encryption key configuration object. This is used to encrypt the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes. The - provided key configuration object property `use_cases` must contain `STORAGE`. + provided key configuration object property use_cases must contain STORAGE. + :param workspace_name: str (optional) + The human-readable name of the workspace. :returns: Long-running operation waiter for :class:`Workspace`. @@ -2553,6 +2597,8 @@ def create( body["cloud"] = cloud if cloud_resource_container is not None: body["cloud_resource_container"] = cloud_resource_container.as_dict() + if compute_mode is not None: + body["compute_mode"] = compute_mode.value if credentials_id is not None: body["credentials_id"] = credentials_id if custom_tags is not None: @@ -2563,8 +2609,6 @@ def create( body["gcp_managed_network_config"] = gcp_managed_network_config.as_dict() if gke_config is not None: body["gke_config"] = gke_config.as_dict() - if is_no_public_ip_enabled is not None: - body["is_no_public_ip_enabled"] = is_no_public_ip_enabled if location is not None: body["location"] = location if managed_services_customer_managed_key_id is not None: @@ -2597,17 +2641,16 @@ def create( def create_and_wait( self, - workspace_name: str, *, aws_region: Optional[str] = None, cloud: Optional[str] = None, cloud_resource_container: Optional[CloudResourceContainer] = None, + compute_mode: Optional[CustomerFacingComputeMode] = None, credentials_id: Optional[str] = None, custom_tags: Optional[Dict[str, str]] = None, deployment_name: Optional[str] = None, gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None, gke_config: Optional[GkeConfig] = None, - is_no_public_ip_enabled: Optional[bool] = None, location: Optional[str] = None, managed_services_customer_managed_key_id: Optional[str] = None, network_id: Optional[str] = None, @@ -2615,18 +2658,19 @@ def create_and_wait( private_access_settings_id: Optional[str] = None, storage_configuration_id: Optional[str] = None, storage_customer_managed_key_id: Optional[str] = None, + workspace_name: Optional[str] = None, timeout=timedelta(minutes=20), ) -> Workspace: return self.create( aws_region=aws_region, cloud=cloud, cloud_resource_container=cloud_resource_container, + compute_mode=compute_mode, credentials_id=credentials_id, custom_tags=custom_tags, deployment_name=deployment_name, gcp_managed_network_config=gcp_managed_network_config, gke_config=gke_config, - is_no_public_ip_enabled=is_no_public_ip_enabled, location=location, managed_services_customer_managed_key_id=managed_services_customer_managed_key_id, network_id=network_id, @@ -2637,42 +2681,34 @@ def create_and_wait( workspace_name=workspace_name, ).result(timeout=timeout) - def delete(self, workspace_id: int): - """Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. - However, it might take a few minutes for all workspaces resources to be deleted, depending on the size - and number of workspace resources. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. + def delete(self, workspace_id: int) -> Workspace: + """Deletes a Databricks workspace, both specified by ID. :param workspace_id: int - Workspace ID. - + :returns: :class:`Workspace` """ headers = { "Accept": "application/json", } - self._api.do("DELETE", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", headers=headers) + res = self._api.do( + "DELETE", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", headers=headers + ) + return Workspace.from_dict(res) def get(self, workspace_id: int) -> Workspace: """Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace - becomes available when the status changes to `RUNNING`. - - For information about how to create a new workspace with this API **including error handling**, see - [Create a new workspace using the Account API]. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. + becomes available when the status changes to `RUNNING`. For information about how to create a new + workspace with this API **including error handling**, see [Create a new workspace using the Account + API]. [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html :param workspace_id: int - Workspace ID. :returns: :class:`Workspace` """ @@ -2687,10 +2723,7 @@ def get(self, workspace_id: int) -> Workspace: return Workspace.from_dict(res) def list(self) -> Iterator[Workspace]: - """Gets a list of all workspaces associated with an account, specified by ID. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. + """Lists Databricks workspaces for an account. :returns: Iterator over :class:`Workspace` @@ -2704,202 +2737,58 @@ def list(self) -> Iterator[Workspace]: return [Workspace.from_dict(v) for v in res] def update( - self, - workspace_id: int, - *, - aws_region: Optional[str] = None, - credentials_id: Optional[str] = None, - custom_tags: Optional[Dict[str, str]] = None, - managed_services_customer_managed_key_id: Optional[str] = None, - network_connectivity_config_id: Optional[str] = None, - network_id: Optional[str] = None, - private_access_settings_id: Optional[str] = None, - storage_configuration_id: Optional[str] = None, - storage_customer_managed_key_id: Optional[str] = None, + self, workspace_id: int, customer_facing_workspace: Workspace, *, update_mask: Optional[str] = None ) -> Wait[Workspace]: - """Updates a workspace configuration for either a running workspace or a failed workspace. The elements - that can be updated varies between these two use cases. - - ### Update a failed workspace You can update a Databricks workspace configuration for failed workspace - deployment for some fields, but not all fields. For a failed workspace, this request supports updates - to the following fields only: - Credential configuration ID - Storage configuration ID - Network - configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a - failed workspace only, you can convert a workspace with Databricks-managed VPC to use a - customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC - to be a Databricks-managed VPC. You can update the network configuration for a failed or running - workspace to add PrivateLink support, though you must also add a private access settings object. - Key - configuration ID for managed services (control plane storage, such as notebook source and Databricks - SQL queries). Used only if you use customer-managed keys for managed services. - Key configuration ID - for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use - customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running - state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID - for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update - the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both - types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink - support on a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be - applied. - Network connectivity configuration ID to add serverless stable IP support. You can add or - update the network connectivity configuration ID to ensure the workspace uses the same set of stable - IP CIDR blocks to access your resources. You cannot remove a network connectivity configuration from - the workspace once attached, you can only switch to another one. - - After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` - requests with the workspace ID and check the workspace status. The workspace is successful if the - status changes to `RUNNING`. - - For information about how to create a new workspace with this API **including error handling**, see - [Create a new workspace using the Account API]. - - ### Update a running workspace You can update a Databricks workspace configuration for running - workspaces for some fields, but not all fields. For a running workspace, this request supports - updating the following fields only: - Credential configuration ID - Network configuration ID. Used - only if you already use a customer-managed VPC. You cannot convert a running workspace from a - Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in this - API for a failed or running workspace to add support for PrivateLink, although you also need to add a - private access settings object. - Key configuration ID for managed services (control plane storage, - such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data - with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) - that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to - encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK - for managed services, adding this ID enables managed services encryption for new or updated data. - Existing managed services data that existed before adding the key remains not encrypted with the DEK - until it is modified. If the workspace already has customer-managed keys for managed services, this - request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key - configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this - only if the workspace does not already have a customer-managed key configuration for workspace - storage. - Private access settings ID to add PrivateLink support. You can add or update the private - access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of - connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on - a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be applied. - - Network connectivity configuration ID to add serverless stable IP support. You can add or update the - network connectivity configuration ID to ensure the workspace uses the same set of stable IP CIDR - blocks to access your resources. You cannot remove a network connectivity configuration from the - workspace once attached, you can only switch to another one. - - **Important**: To update a running workspace, your workspace must have no running compute resources - that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose - clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not - terminate all cluster instances in the workspace before calling this API, the request will fail. - - ### Wait until changes take effect. After calling the `PATCH` operation to update the workspace - configuration, make repeated `GET` requests with the workspace ID and check the workspace status and - the status of the fields. * For workspaces with a Databricks-managed VPC, the workspace status becomes - `PROVISIONING` temporarily (typically under 20 minutes). If the workspace update is successful, the - workspace status changes to `RUNNING`. Note that you can also check the workspace status in the - [Account Console]. However, you cannot use or create clusters for another 20 minutes after that status - change. This results in a total of up to 40 minutes in which you cannot create clusters. If you create - or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could - cause other unexpected behavior. * For workspaces with a customer-managed VPC, the workspace status - stays at status `RUNNING` and the VPC change happens immediately. A change to the storage - customer-managed key configuration ID might take a few minutes to update, so continue to check the - workspace until you observe that it has been updated. If the update fails, the workspace might revert - silently to its original configuration. After the workspace has been updated, you cannot use or create - clusters for another 20 minutes. If you create or use clusters before this time interval elapses, - clusters do not launch successfully, fail, or could cause other unexpected behavior. - - If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the changes - to fully take effect. During the 20 minute wait, it is important that you stop all REST API calls to - the DBFS API. If you are modifying _only the managed services key configuration_, you can omit the 20 - minute wait. - - **Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment - types and subscription types. If you have questions about availability, contact your Databricks - representative. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. - - [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + """Updates a workspace. :param workspace_id: int - Workspace ID. - :param aws_region: str (optional) - The AWS region of the workspace's data plane (for example, `us-west-2`). This parameter is available - only for updating failed workspaces. - :param credentials_id: str (optional) - ID of the workspace's credential configuration object. This parameter is available for updating both - failed and running workspaces. - :param custom_tags: Dict[str,str] (optional) - The custom tags key-value pairing that is attached to this workspace. The key-value pair is a string - of utf-8 characters. The value can be an empty string, with maximum length of 255 characters. The - key can be of maximum length of 127 characters, and cannot be empty. - :param managed_services_customer_managed_key_id: str (optional) - The ID of the workspace's managed services encryption key configuration object. This parameter is - available only for updating failed workspaces. - :param network_connectivity_config_id: str (optional) - :param network_id: str (optional) - The ID of the workspace's network configuration object. Used only if you already use a - customer-managed VPC. For failed workspaces only, you can switch from a Databricks-managed VPC to a - customer-managed VPC by updating the workspace to add a network configuration ID. - :param private_access_settings_id: str (optional) - The ID of the workspace's private access settings configuration object. This parameter is available - only for updating failed workspaces. - :param storage_configuration_id: str (optional) - The ID of the workspace's storage configuration object. This parameter is available only for - updating failed workspaces. - :param storage_customer_managed_key_id: str (optional) - The ID of the key configuration object for workspace storage. This parameter is available for - updating both failed and running workspaces. + A unique integer ID for the workspace + :param customer_facing_workspace: :class:`Workspace` + :param update_mask: str (optional) + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. :returns: Long-running operation waiter for :class:`Workspace`. See :method:wait_get_workspace_running for more details. """ - body = {} - if aws_region is not None: - body["aws_region"] = aws_region - if credentials_id is not None: - body["credentials_id"] = credentials_id - if custom_tags is not None: - body["custom_tags"] = custom_tags - if managed_services_customer_managed_key_id is not None: - body["managed_services_customer_managed_key_id"] = managed_services_customer_managed_key_id - if network_connectivity_config_id is not None: - body["network_connectivity_config_id"] = network_connectivity_config_id - if network_id is not None: - body["network_id"] = network_id - if private_access_settings_id is not None: - body["private_access_settings_id"] = private_access_settings_id - if storage_configuration_id is not None: - body["storage_configuration_id"] = storage_configuration_id - if storage_customer_managed_key_id is not None: - body["storage_customer_managed_key_id"] = storage_customer_managed_key_id + body = customer_facing_workspace.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask headers = { "Accept": "application/json", "Content-Type": "application/json", } op_response = self._api.do( - "PATCH", f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", body=body, headers=headers + "PATCH", + f"/api/2.0/accounts/{self._api.account_id}/workspaces/{workspace_id}", + query=query, + body=body, + headers=headers, ) return Wait( - self.wait_get_workspace_running, response=UpdateResponse.from_dict(op_response), workspace_id=workspace_id + self.wait_get_workspace_running, + response=Workspace.from_dict(op_response), + workspace_id=op_response["workspace_id"], ) def update_and_wait( self, workspace_id: int, + customer_facing_workspace: Workspace, *, - aws_region: Optional[str] = None, - credentials_id: Optional[str] = None, - custom_tags: Optional[Dict[str, str]] = None, - managed_services_customer_managed_key_id: Optional[str] = None, - network_connectivity_config_id: Optional[str] = None, - network_id: Optional[str] = None, - private_access_settings_id: Optional[str] = None, - storage_configuration_id: Optional[str] = None, - storage_customer_managed_key_id: Optional[str] = None, + update_mask: Optional[str] = None, timeout=timedelta(minutes=20), ) -> Workspace: return self.update( - aws_region=aws_region, - credentials_id=credentials_id, - custom_tags=custom_tags, - managed_services_customer_managed_key_id=managed_services_customer_managed_key_id, - network_connectivity_config_id=network_connectivity_config_id, - network_id=network_id, - private_access_settings_id=private_access_settings_id, - storage_configuration_id=storage_configuration_id, - storage_customer_managed_key_id=storage_customer_managed_key_id, - workspace_id=workspace_id, + customer_facing_workspace=customer_facing_workspace, update_mask=update_mask, workspace_id=workspace_id ).result(timeout=timeout) diff --git a/databricks/sdk/service/qualitymonitorv2.py b/databricks/sdk/service/qualitymonitorv2.py index a6fab7023..337bb86a5 100755 --- a/databricks/sdk/service/qualitymonitorv2.py +++ b/databricks/sdk/service/qualitymonitorv2.py @@ -7,7 +7,7 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/serving.py b/databricks/sdk/service/serving.py index f707aadf7..d219f7495 100755 --- a/databricks/sdk/service/serving.py +++ b/databricks/sdk/service/serving.py @@ -13,8 +13,10 @@ import requests +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index 3004f17da..a37ace4f5 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -7,7 +7,8 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict, _repeated_enum +from databricks.sdk.service._internal import (_enum, _from_dict, + _repeated_dict, _repeated_enum) _LOG = logging.getLogger("databricks.sdk") @@ -660,7 +661,8 @@ def from_dict(cls, d: Dict[str, Any]) -> ComplianceSecurityProfileSetting: class ComplianceStandard(Enum): - """Compliance stardard for SHIELD customers""" + """Compliance standard for SHIELD customers. See README.md for how instructions of how to add new + standards.""" CANADA_PROTECTED_B = "CANADA_PROTECTED_B" CYBER_ESSENTIAL_PLUS = "CYBER_ESSENTIAL_PLUS" @@ -668,6 +670,7 @@ class ComplianceStandard(Enum): FEDRAMP_IL5 = "FEDRAMP_IL5" FEDRAMP_MODERATE = "FEDRAMP_MODERATE" GERMANY_C5 = "GERMANY_C5" + GERMANY_TISAX = "GERMANY_TISAX" HIPAA = "HIPAA" HITRUST = "HITRUST" IRAP_PROTECTED = "IRAP_PROTECTED" diff --git a/databricks/sdk/service/settingsv2.py b/databricks/sdk/service/settingsv2.py index babfb1a09..322ffd47c 100755 --- a/databricks/sdk/service/settingsv2.py +++ b/databricks/sdk/service/settingsv2.py @@ -7,7 +7,7 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index ad791cc15..172307d67 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -7,13 +7,13 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict, _repeated_enum +from databricks.sdk.service import catalog +from databricks.sdk.service._internal import (_enum, _from_dict, + _repeated_dict, _repeated_enum) _LOG = logging.getLogger("databricks.sdk") -from databricks.sdk.service import catalog - # all definitions in this file are in alphabetical order @@ -2310,6 +2310,10 @@ class TableInternalAttributes: auxiliary_managed_location: Optional[str] = None """Managed Delta Metadata location for foreign iceberg tables.""" + dependency_storage_locations: Optional[List[str]] = None + """Storage locations of all table dependencies for shared views. Used on the recipient side for SEG + (Secure Egress Gateway) whitelisting.""" + parent_storage_location: Optional[str] = None """Will be populated in the reconciliation response for VIEW and FOREIGN_TABLE, with the value of the parent UC entity's storage_location, following the same logic as getManagedEntityPath in @@ -2332,6 +2336,8 @@ def as_dict(self) -> dict: body = {} if self.auxiliary_managed_location is not None: body["auxiliary_managed_location"] = self.auxiliary_managed_location + if self.dependency_storage_locations: + body["dependency_storage_locations"] = [v for v in self.dependency_storage_locations] if self.parent_storage_location is not None: body["parent_storage_location"] = self.parent_storage_location if self.storage_location is not None: @@ -2347,6 +2353,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.auxiliary_managed_location is not None: body["auxiliary_managed_location"] = self.auxiliary_managed_location + if self.dependency_storage_locations: + body["dependency_storage_locations"] = self.dependency_storage_locations if self.parent_storage_location is not None: body["parent_storage_location"] = self.parent_storage_location if self.storage_location is not None: @@ -2362,6 +2370,7 @@ def from_dict(cls, d: Dict[str, Any]) -> TableInternalAttributes: """Deserializes the TableInternalAttributes from a dictionary.""" return cls( auxiliary_managed_location=d.get("auxiliary_managed_location", None), + dependency_storage_locations=d.get("dependency_storage_locations", None), parent_storage_location=d.get("parent_storage_location", None), storage_location=d.get("storage_location", None), type=_enum(d, "type", TableInternalAttributesSharedTableType), diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 33b80c3c9..46d34e96b 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict, _repeated_enum) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger("databricks.sdk") @@ -1080,9 +1082,6 @@ def from_dict(cls, d: Dict[str, Any]) -> AlertV2Subscription: @dataclass class BaseChunkInfo: - """Describes metadata for a particular chunk, within a result set; this structure is used both - within a manifest, and when fetching individual chunk data or links.""" - byte_count: Optional[int] = None """The number of bytes in the result chunk. This field is not available when using `INLINE` disposition.""" @@ -1686,8 +1685,6 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateVisualizationRequestVisualization class CreateWarehouseRequestWarehouseType(Enum): - """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` - and also set the field `enable_serverless_compute` to `true`.""" CLASSIC = "CLASSIC" PRO = "PRO" @@ -2251,8 +2248,6 @@ class Disposition(Enum): class EditWarehouseRequestWarehouseType(Enum): - """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` - and also set the field `enable_serverless_compute` to `true`.""" CLASSIC = "CLASSIC" PRO = "PRO" @@ -2341,6 +2336,7 @@ class EndpointHealth: """Deprecated. split into summary and details for security""" status: Optional[Status] = None + """Health status of the endpoint.""" summary: Optional[str] = None """A short summary of the health status in case of degraded/failed warehouses.""" @@ -2434,7 +2430,7 @@ class EndpointInfo: max_num_clusters: Optional[int] = None """Maximum number of clusters that the autoscaler will create to handle concurrent queries. - Supported values: - Must be >= min_num_clusters - Must be <= 30. + Supported values: - Must be >= min_num_clusters - Must be <= 40. Defaults to min_clusters if unset.""" @@ -2463,8 +2459,10 @@ class EndpointInfo: """ODBC parameters for the SQL warehouse""" spot_instance_policy: Optional[SpotInstancePolicy] = None + """Configurations whether the endpoint should use spot instances.""" state: Optional[State] = None + """state of the endpoint""" tags: Optional[EndpointTags] = None """A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS @@ -2594,8 +2592,6 @@ def from_dict(cls, d: Dict[str, Any]) -> EndpointInfo: class EndpointInfoWarehouseType(Enum): - """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` - and also set the field `enable_serverless_compute` to `true`.""" CLASSIC = "CLASSIC" PRO = "PRO" @@ -2725,6 +2721,9 @@ class ExternalLink: which point a new `external_link` must be requested.""" external_link: Optional[str] = None + """A URL pointing to a chunk of result data, hosted by an external service, with a short expiration + time (<= 15 minutes). As this URL contains a temporary credential, it should be considered + sensitive and the client should not expose this URL in a log.""" http_headers: Optional[Dict[str, str]] = None """HTTP headers that must be included with a GET request to the `external_link`. Each header is @@ -2735,7 +2734,7 @@ class ExternalLink: next_chunk_index: Optional[int] = None """When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are no more chunks. The next chunk can be fetched with a - :method:statementexecution/getStatementResultChunkN request.""" + :method:statementexecution/getstatementresultchunkn request.""" next_chunk_internal_link: Optional[str] = None """When fetching, provides a link to fetch the _next_ chunk. If absent, indicates there are no more @@ -3048,7 +3047,7 @@ class GetWarehouseResponse: max_num_clusters: Optional[int] = None """Maximum number of clusters that the autoscaler will create to handle concurrent queries. - Supported values: - Must be >= min_num_clusters - Must be <= 30. + Supported values: - Must be >= min_num_clusters - Must be <= 40. Defaults to min_clusters if unset.""" @@ -3077,8 +3076,10 @@ class GetWarehouseResponse: """ODBC parameters for the SQL warehouse""" spot_instance_policy: Optional[SpotInstancePolicy] = None + """Configurations whether the endpoint should use spot instances.""" state: Optional[State] = None + """state of the endpoint""" tags: Optional[EndpointTags] = None """A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS @@ -3087,6 +3088,8 @@ class GetWarehouseResponse: Supported values: - Number of tags < 45.""" warehouse_type: Optional[GetWarehouseResponseWarehouseType] = None + """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` + and also set the field `enable_serverless_compute` to `true`.""" def as_dict(self) -> dict: """Serializes the GetWarehouseResponse into a dictionary suitable for use as a JSON request body.""" @@ -3206,8 +3209,6 @@ def from_dict(cls, d: Dict[str, Any]) -> GetWarehouseResponse: class GetWarehouseResponseWarehouseType(Enum): - """Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` - and also set the field `enable_serverless_compute` to `true`.""" CLASSIC = "CLASSIC" PRO = "PRO" @@ -3226,6 +3227,9 @@ class GetWorkspaceWarehouseConfigResponse: """Spark confs for external hive metastore configuration JSON serialized size must be less than <= 512K""" + enable_serverless_compute: Optional[bool] = None + """Enable Serverless compute for SQL warehouses""" + enabled_warehouse_types: Optional[List[WarehouseTypePair]] = None """List of Warehouse Types allowed in this workspace (limits allowed value of the type field in CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be @@ -3240,7 +3244,8 @@ class GetWorkspaceWarehouseConfigResponse: """GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage""" instance_profile_arn: Optional[str] = None - """AWS Only: Instance profile used to pass IAM role to the cluster""" + """AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This + configuration is also applied to the workspace's serverless compute for notebooks and jobs.""" security_policy: Optional[GetWorkspaceWarehouseConfigResponseSecurityPolicy] = None """Security policy for warehouses""" @@ -3257,6 +3262,8 @@ def as_dict(self) -> dict: body["config_param"] = self.config_param.as_dict() if self.data_access_config: body["data_access_config"] = [v.as_dict() for v in self.data_access_config] + if self.enable_serverless_compute is not None: + body["enable_serverless_compute"] = self.enable_serverless_compute if self.enabled_warehouse_types: body["enabled_warehouse_types"] = [v.as_dict() for v in self.enabled_warehouse_types] if self.global_param: @@ -3280,6 +3287,8 @@ def as_shallow_dict(self) -> dict: body["config_param"] = self.config_param if self.data_access_config: body["data_access_config"] = self.data_access_config + if self.enable_serverless_compute is not None: + body["enable_serverless_compute"] = self.enable_serverless_compute if self.enabled_warehouse_types: body["enabled_warehouse_types"] = self.enabled_warehouse_types if self.global_param: @@ -3301,6 +3310,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetWorkspaceWarehouseConfigResponse: channel=_from_dict(d, "channel", Channel), config_param=_from_dict(d, "config_param", RepeatedEndpointConfPairs), data_access_config=_repeated_dict(d, "data_access_config", EndpointConfPair), + enable_serverless_compute=d.get("enable_serverless_compute", None), enabled_warehouse_types=_repeated_dict(d, "enabled_warehouse_types", WarehouseTypePair), global_param=_from_dict(d, "global_param", RepeatedEndpointConfPairs), google_service_account=d.get("google_service_account", None), @@ -3311,7 +3321,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetWorkspaceWarehouseConfigResponse: class GetWorkspaceWarehouseConfigResponseSecurityPolicy(Enum): - """Security policy for warehouses""" + """Security policy to be used for warehouses""" DATA_ACCESS_CONTROL = "DATA_ACCESS_CONTROL" NONE = "NONE" @@ -4264,12 +4274,18 @@ def from_dict(cls, d: Dict[str, Any]) -> ListVisualizationsForQueryResponse: @dataclass class ListWarehousesResponse: + next_page_token: Optional[str] = None + """A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, + there are no subsequent pages.""" + warehouses: Optional[List[EndpointInfo]] = None """A list of warehouses and their configurations.""" def as_dict(self) -> dict: """Serializes the ListWarehousesResponse into a dictionary suitable for use as a JSON request body.""" body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token if self.warehouses: body["warehouses"] = [v.as_dict() for v in self.warehouses] return body @@ -4277,6 +4293,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the ListWarehousesResponse into a shallow dictionary of its immediate attributes.""" body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token if self.warehouses: body["warehouses"] = self.warehouses return body @@ -4284,7 +4302,9 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> ListWarehousesResponse: """Deserializes the ListWarehousesResponse from a dictionary.""" - return cls(warehouses=_repeated_dict(d, "warehouses", EndpointInfo)) + return cls( + next_page_token=d.get("next_page_token", None), warehouses=_repeated_dict(d, "warehouses", EndpointInfo) + ) @dataclass @@ -5551,6 +5571,12 @@ def from_dict(cls, d: Dict[str, Any]) -> RestoreResponse: @dataclass class ResultData: + """Contains the result data of a single chunk when using `INLINE` disposition. When using + `EXTERNAL_LINKS` disposition, the array `external_links` is used instead to provide URLs to the + result data in cloud storage. Exactly one of these alternatives is used. (While the + `external_links` array prepares the API to return multiple links in a single response. Currently + only a single link is returned.)""" + byte_count: Optional[int] = None """The number of bytes in the result chunk. This field is not available when using `INLINE` disposition.""" @@ -5567,7 +5593,7 @@ class ResultData: next_chunk_index: Optional[int] = None """When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are no more chunks. The next chunk can be fetched with a - :method:statementexecution/getStatementResultChunkN request.""" + :method:statementexecution/getstatementresultchunkn request.""" next_chunk_internal_link: Optional[str] = None """When fetching, provides a link to fetch the _next_ chunk. If absent, indicates there are no more @@ -5855,7 +5881,7 @@ def from_dict(cls, d: Dict[str, Any]) -> SetResponse: class SetWorkspaceWarehouseConfigRequestSecurityPolicy(Enum): - """Security policy for warehouses""" + """Security policy to be used for warehouses""" DATA_ACCESS_CONTROL = "DATA_ACCESS_CONTROL" NONE = "NONE" @@ -5881,7 +5907,20 @@ def from_dict(cls, d: Dict[str, Any]) -> SetWorkspaceWarehouseConfigResponse: class SpotInstancePolicy(Enum): - """Configurations whether the warehouse should use spot instances.""" + """EndpointSpotInstancePolicy configures whether the endpoint should use spot instances. + + The breakdown of how the EndpointSpotInstancePolicy converts to per cloud configurations is: + + +-------+--------------------------------------+--------------------------------+ | Cloud | + COST_OPTIMIZED | RELIABILITY_OPTIMIZED | + +-------+--------------------------------------+--------------------------------+ | AWS | On + Demand Driver with Spot Executors | On Demand Driver and Executors | | AZURE | On Demand Driver + and Executors | On Demand Driver and Executors | + +-------+--------------------------------------+--------------------------------+ + + While including "spot" in the enum name may limit the the future extensibility of this field + because it limits this enum to denoting "spot or not", this is the field that PM recommends + after discussion with customers per SC-48783.""" COST_OPTIMIZED = "COST_OPTIMIZED" POLICY_UNSPECIFIED = "POLICY_UNSPECIFIED" @@ -5907,7 +5946,7 @@ def from_dict(cls, d: Dict[str, Any]) -> StartWarehouseResponse: class State(Enum): - """State of the warehouse""" + """* State of a warehouse.""" DELETED = "DELETED" DELETING = "DELETING" @@ -6011,11 +6050,6 @@ def from_dict(cls, d: Dict[str, Any]) -> StatementResponse: class StatementState(Enum): - """Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - - `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution - failed; reason for failure described in accomanying error message - `CANCELED`: user canceled; - can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: - execution successful, and statement closed; result no longer available for fetch""" CANCELED = "CANCELED" CLOSED = "CLOSED" @@ -6032,6 +6066,11 @@ class StatementStatus: error: Optional[ServiceError] = None state: Optional[StatementState] = None + """Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - + `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution + failed; reason for failure described in accompanying error message - `CANCELED`: user canceled; + can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: + execution successful, and statement closed; result no longer available for fetch""" def as_dict(self) -> dict: """Serializes the StatementStatus into a dictionary suitable for use as a JSON request body.""" @@ -6058,12 +6097,10 @@ def from_dict(cls, d: Dict[str, Any]) -> StatementStatus: class Status(Enum): - """Health status of the warehouse.""" DEGRADED = "DEGRADED" FAILED = "FAILED" HEALTHY = "HEALTHY" - STATUS_UNSPECIFIED = "STATUS_UNSPECIFIED" @dataclass @@ -6214,20 +6251,35 @@ def from_dict(cls, d: Dict[str, Any]) -> TerminationReason: class TerminationReasonCode(Enum): - """status code indicating why the cluster was terminated""" + """The status code indicating why the cluster was terminated""" ABUSE_DETECTED = "ABUSE_DETECTED" + ACCESS_TOKEN_FAILURE = "ACCESS_TOKEN_FAILURE" + ALLOCATION_TIMEOUT = "ALLOCATION_TIMEOUT" + ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY = "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY" + ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS" + ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS" + ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS" + ALLOCATION_TIMEOUT_NO_READY_CLUSTERS = "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS" + ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS = "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS" + ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS = "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS" ATTACH_PROJECT_FAILURE = "ATTACH_PROJECT_FAILURE" AWS_AUTHORIZATION_FAILURE = "AWS_AUTHORIZATION_FAILURE" + AWS_INACCESSIBLE_KMS_KEY_FAILURE = "AWS_INACCESSIBLE_KMS_KEY_FAILURE" + AWS_INSTANCE_PROFILE_UPDATE_FAILURE = "AWS_INSTANCE_PROFILE_UPDATE_FAILURE" AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE = "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE" AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE = "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE" + AWS_INVALID_KEY_PAIR = "AWS_INVALID_KEY_PAIR" + AWS_INVALID_KMS_KEY_STATE = "AWS_INVALID_KMS_KEY_STATE" AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE = "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE" AWS_REQUEST_LIMIT_EXCEEDED = "AWS_REQUEST_LIMIT_EXCEEDED" + AWS_RESOURCE_QUOTA_EXCEEDED = "AWS_RESOURCE_QUOTA_EXCEEDED" AWS_UNSUPPORTED_FAILURE = "AWS_UNSUPPORTED_FAILURE" AZURE_BYOK_KEY_PERMISSION_FAILURE = "AZURE_BYOK_KEY_PERMISSION_FAILURE" AZURE_EPHEMERAL_DISK_FAILURE = "AZURE_EPHEMERAL_DISK_FAILURE" AZURE_INVALID_DEPLOYMENT_TEMPLATE = "AZURE_INVALID_DEPLOYMENT_TEMPLATE" AZURE_OPERATION_NOT_ALLOWED_EXCEPTION = "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION" + AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE = "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE" AZURE_QUOTA_EXCEEDED_EXCEPTION = "AZURE_QUOTA_EXCEEDED_EXCEPTION" AZURE_RESOURCE_MANAGER_THROTTLING = "AZURE_RESOURCE_MANAGER_THROTTLING" AZURE_RESOURCE_PROVIDER_THROTTLING = "AZURE_RESOURCE_PROVIDER_THROTTLING" @@ -6236,65 +6288,150 @@ class TerminationReasonCode(Enum): AZURE_VNET_CONFIGURATION_FAILURE = "AZURE_VNET_CONFIGURATION_FAILURE" BOOTSTRAP_TIMEOUT = "BOOTSTRAP_TIMEOUT" BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION = "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION" + BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG = "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG" + BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED = "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED" + BUDGET_POLICY_RESOLUTION_FAILURE = "BUDGET_POLICY_RESOLUTION_FAILURE" + CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED = "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED" + CLOUD_ACCOUNT_SETUP_FAILURE = "CLOUD_ACCOUNT_SETUP_FAILURE" + CLOUD_OPERATION_CANCELLED = "CLOUD_OPERATION_CANCELLED" CLOUD_PROVIDER_DISK_SETUP_FAILURE = "CLOUD_PROVIDER_DISK_SETUP_FAILURE" + CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED = "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED" CLOUD_PROVIDER_LAUNCH_FAILURE = "CLOUD_PROVIDER_LAUNCH_FAILURE" + CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG" CLOUD_PROVIDER_RESOURCE_STOCKOUT = "CLOUD_PROVIDER_RESOURCE_STOCKOUT" + CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG = "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG" CLOUD_PROVIDER_SHUTDOWN = "CLOUD_PROVIDER_SHUTDOWN" + CLUSTER_OPERATION_THROTTLED = "CLUSTER_OPERATION_THROTTLED" + CLUSTER_OPERATION_TIMEOUT = "CLUSTER_OPERATION_TIMEOUT" COMMUNICATION_LOST = "COMMUNICATION_LOST" CONTAINER_LAUNCH_FAILURE = "CONTAINER_LAUNCH_FAILURE" CONTROL_PLANE_REQUEST_FAILURE = "CONTROL_PLANE_REQUEST_FAILURE" + CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG = "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG" DATABASE_CONNECTION_FAILURE = "DATABASE_CONNECTION_FAILURE" + DATA_ACCESS_CONFIG_CHANGED = "DATA_ACCESS_CONFIG_CHANGED" DBFS_COMPONENT_UNHEALTHY = "DBFS_COMPONENT_UNHEALTHY" + DISASTER_RECOVERY_REPLICATION = "DISASTER_RECOVERY_REPLICATION" + DNS_RESOLUTION_ERROR = "DNS_RESOLUTION_ERROR" + DOCKER_CONTAINER_CREATION_EXCEPTION = "DOCKER_CONTAINER_CREATION_EXCEPTION" DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" + DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" + DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" + DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" + DRIVER_EVICTION = "DRIVER_EVICTION" + DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" + DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" + DRIVER_OUT_OF_DISK = "DRIVER_OUT_OF_DISK" + DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY" + DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE" + DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE" + DRIVER_UNHEALTHY = "DRIVER_UNHEALTHY" DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE" DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE" + DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED" + EOS_SPARK_IMAGE = "EOS_SPARK_IMAGE" EXECUTION_COMPONENT_UNHEALTHY = "EXECUTION_COMPONENT_UNHEALTHY" + EXECUTOR_POD_UNSCHEDULED = "EXECUTOR_POD_UNSCHEDULED" + GCP_API_RATE_QUOTA_EXCEEDED = "GCP_API_RATE_QUOTA_EXCEEDED" + GCP_DENIED_BY_ORG_POLICY = "GCP_DENIED_BY_ORG_POLICY" + GCP_FORBIDDEN = "GCP_FORBIDDEN" + GCP_IAM_TIMEOUT = "GCP_IAM_TIMEOUT" + GCP_INACCESSIBLE_KMS_KEY_FAILURE = "GCP_INACCESSIBLE_KMS_KEY_FAILURE" + GCP_INSUFFICIENT_CAPACITY = "GCP_INSUFFICIENT_CAPACITY" + GCP_IP_SPACE_EXHAUSTED = "GCP_IP_SPACE_EXHAUSTED" + GCP_KMS_KEY_PERMISSION_DENIED = "GCP_KMS_KEY_PERMISSION_DENIED" + GCP_NOT_FOUND = "GCP_NOT_FOUND" GCP_QUOTA_EXCEEDED = "GCP_QUOTA_EXCEEDED" + GCP_RESOURCE_QUOTA_EXCEEDED = "GCP_RESOURCE_QUOTA_EXCEEDED" + GCP_SERVICE_ACCOUNT_ACCESS_DENIED = "GCP_SERVICE_ACCOUNT_ACCESS_DENIED" GCP_SERVICE_ACCOUNT_DELETED = "GCP_SERVICE_ACCOUNT_DELETED" + GCP_SERVICE_ACCOUNT_NOT_FOUND = "GCP_SERVICE_ACCOUNT_NOT_FOUND" + GCP_SUBNET_NOT_READY = "GCP_SUBNET_NOT_READY" + GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED = "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED" + GKE_BASED_CLUSTER_TERMINATION = "GKE_BASED_CLUSTER_TERMINATION" GLOBAL_INIT_SCRIPT_FAILURE = "GLOBAL_INIT_SCRIPT_FAILURE" HIVE_METASTORE_PROVISIONING_FAILURE = "HIVE_METASTORE_PROVISIONING_FAILURE" IMAGE_PULL_PERMISSION_DENIED = "IMAGE_PULL_PERMISSION_DENIED" INACTIVITY = "INACTIVITY" + INIT_CONTAINER_NOT_FINISHED = "INIT_CONTAINER_NOT_FINISHED" INIT_SCRIPT_FAILURE = "INIT_SCRIPT_FAILURE" INSTANCE_POOL_CLUSTER_FAILURE = "INSTANCE_POOL_CLUSTER_FAILURE" + INSTANCE_POOL_MAX_CAPACITY_REACHED = "INSTANCE_POOL_MAX_CAPACITY_REACHED" + INSTANCE_POOL_NOT_FOUND = "INSTANCE_POOL_NOT_FOUND" INSTANCE_UNREACHABLE = "INSTANCE_UNREACHABLE" + INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG = "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG" + INTERNAL_CAPACITY_FAILURE = "INTERNAL_CAPACITY_FAILURE" INTERNAL_ERROR = "INTERNAL_ERROR" INVALID_ARGUMENT = "INVALID_ARGUMENT" + INVALID_AWS_PARAMETER = "INVALID_AWS_PARAMETER" + INVALID_INSTANCE_PLACEMENT_PROTOCOL = "INVALID_INSTANCE_PLACEMENT_PROTOCOL" INVALID_SPARK_IMAGE = "INVALID_SPARK_IMAGE" + INVALID_WORKER_IMAGE_FAILURE = "INVALID_WORKER_IMAGE_FAILURE" + IN_PENALTY_BOX = "IN_PENALTY_BOX" IP_EXHAUSTION_FAILURE = "IP_EXHAUSTION_FAILURE" JOB_FINISHED = "JOB_FINISHED" + K8S_ACTIVE_POD_QUOTA_EXCEEDED = "K8S_ACTIVE_POD_QUOTA_EXCEEDED" K8S_AUTOSCALING_FAILURE = "K8S_AUTOSCALING_FAILURE" K8S_DBR_CLUSTER_LAUNCH_TIMEOUT = "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT" + LAZY_ALLOCATION_TIMEOUT = "LAZY_ALLOCATION_TIMEOUT" + MAINTENANCE_MODE = "MAINTENANCE_MODE" METASTORE_COMPONENT_UNHEALTHY = "METASTORE_COMPONENT_UNHEALTHY" NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT" + NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT" + NETWORK_CHECK_CONTROL_PLANE_FAILURE = "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + NETWORK_CHECK_DNS_SERVER_FAILURE = "NETWORK_CHECK_DNS_SERVER_FAILURE" + NETWORK_CHECK_METADATA_ENDPOINT_FAILURE = "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE = "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + NETWORK_CHECK_NIC_FAILURE = "NETWORK_CHECK_NIC_FAILURE" + NETWORK_CHECK_STORAGE_FAILURE = "NETWORK_CHECK_STORAGE_FAILURE" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" + NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" + NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" + NO_MATCHED_K8S = "NO_MATCHED_K8S" + NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE" + POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE" + POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE" REQUEST_REJECTED = "REQUEST_REJECTED" REQUEST_THROTTLED = "REQUEST_THROTTLED" + RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED" + SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" + SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" + SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" + SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" SKIPPED_SLOW_NODES = "SKIPPED_SLOW_NODES" SLOW_IMAGE_DOWNLOAD = "SLOW_IMAGE_DOWNLOAD" SPARK_ERROR = "SPARK_ERROR" SPARK_IMAGE_DOWNLOAD_FAILURE = "SPARK_IMAGE_DOWNLOAD_FAILURE" + SPARK_IMAGE_DOWNLOAD_THROTTLED = "SPARK_IMAGE_DOWNLOAD_THROTTLED" + SPARK_IMAGE_NOT_FOUND = "SPARK_IMAGE_NOT_FOUND" SPARK_STARTUP_FAILURE = "SPARK_STARTUP_FAILURE" SPOT_INSTANCE_TERMINATION = "SPOT_INSTANCE_TERMINATION" + SSH_BOOTSTRAP_FAILURE = "SSH_BOOTSTRAP_FAILURE" STORAGE_DOWNLOAD_FAILURE = "STORAGE_DOWNLOAD_FAILURE" + STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG = "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG" + STORAGE_DOWNLOAD_FAILURE_SLOW = "STORAGE_DOWNLOAD_FAILURE_SLOW" + STORAGE_DOWNLOAD_FAILURE_THROTTLED = "STORAGE_DOWNLOAD_FAILURE_THROTTLED" STS_CLIENT_SETUP_FAILURE = "STS_CLIENT_SETUP_FAILURE" SUBNET_EXHAUSTED_FAILURE = "SUBNET_EXHAUSTED_FAILURE" TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE" TRIAL_EXPIRED = "TRIAL_EXPIRED" UNEXPECTED_LAUNCH_FAILURE = "UNEXPECTED_LAUNCH_FAILURE" + UNEXPECTED_POD_RECREATION = "UNEXPECTED_POD_RECREATION" UNKNOWN = "UNKNOWN" UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE" UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE" + USAGE_POLICY_ENTITLEMENT_DENIED = "USAGE_POLICY_ENTITLEMENT_DENIED" + USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION" USER_REQUEST = "USER_REQUEST" WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE" WORKSPACE_CANCELLED_ERROR = "WORKSPACE_CANCELLED_ERROR" WORKSPACE_CONFIGURATION_ERROR = "WORKSPACE_CONFIGURATION_ERROR" + WORKSPACE_UPDATE = "WORKSPACE_UPDATE" class TerminationReasonType(Enum): @@ -7008,12 +7145,14 @@ def from_dict(cls, d: Dict[str, Any]) -> WarehousePermissionsDescription: @dataclass class WarehouseTypePair: + """* Configuration values to enable or disable the access to specific warehouse types in the + workspace.""" + enabled: Optional[bool] = None """If set to false the specific warehouse type will not be be allowed as a value for warehouse_type in CreateWarehouse and EditWarehouse""" warehouse_type: Optional[WarehouseTypePairWarehouseType] = None - """Warehouse type: `PRO` or `CLASSIC`.""" def as_dict(self) -> dict: """Serializes the WarehouseTypePair into a dictionary suitable for use as a JSON request body.""" @@ -7042,7 +7181,6 @@ def from_dict(cls, d: Dict[str, Any]) -> WarehouseTypePair: class WarehouseTypePairWarehouseType(Enum): - """Warehouse type: `PRO` or `CLASSIC`.""" CLASSIC = "CLASSIC" PRO = "PRO" @@ -8819,17 +8957,17 @@ class StatementExecutionAPI: the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement. - In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 - seconds; if the statement execution finishes within this time, the result data is returned directly in the - response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns - with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call - doesn't wait for the statement to finish but returns directly with a statement ID. The status of the - statement execution can be polled by issuing :method:statementexecution/getStatement with the statement - ID. Once the execution has succeeded, this call also returns the result and metadata in the response. - - Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 - seconds; if the statement execution finishes within this time, the result data is returned directly in the - response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can - be used to fetch status and results in the same way as in the asynchronous mode. + In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call waits up to + 30 seconds; if the statement execution finishes within this time, the result data is returned directly in + the response. If the execution takes longer than 30 seconds, the execution is canceled and the call + returns with a `CANCELED` state. - **Asynchronous mode** (`wait_timeout=0s` and `on_wait_timeout` is + ignored): The call doesn't wait for the statement to finish but returns directly with a statement ID. The + status of the statement execution can be polled by issuing :method:statementexecution/getStatement with + the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the + response. - **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits + for up to 10 seconds; if the statement execution finishes within this time, the result data is returned + directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The + statement ID can be used to fetch status and results in the same way as in the asynchronous mode. Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest @@ -8884,7 +9022,7 @@ def __init__(self, api_client): def cancel_execution(self, statement_id: str): """Requests that an executing statement be canceled. Callers must poll for status to see the terminal - state. + state. Cancel response is empty; receiving response indicates successful receipt. :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required @@ -8912,7 +9050,52 @@ def execute_statement( schema: Optional[str] = None, wait_timeout: Optional[str] = None, ) -> StatementResponse: - """Execute a SQL statement + """Execute a SQL statement and optionally await its results for a specified time. + + **Use case: small result sets with INLINE + JSON_ARRAY** + + For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of + `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data. + + **Use case: large result sets with EXTERNAL_LINKS** + + Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets efficiently. The + main differences from using `INLINE` disposition are that the result data is accessed with URLs, and + that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and `CSV` compared to only + `JSON_ARRAY` with `INLINE`. + + ** URLs** + + External links point to data stored within your workspace's internal storage, in the form of a URL. + The URLs are valid for only a short period, <= 15 minutes. Alongside each `external_link` is an + expiration field indicating the time at which the URL is no longer valid. In `EXTERNAL_LINKS` mode, + chunks can be resolved and fetched multiple times and in parallel. + + ---- + + ### **Warning: Databricks strongly recommends that you protect the URLs that are returned by the + `EXTERNAL_LINKS` disposition.** + + When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be used to + download the results directly from . As a short-lived is embedded in this URL, you should protect the + URL. + + Because URLs are already generated with embedded temporary s, you must not set an `Authorization` + header in the download requests. + + The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case. + + See also [Security best practices]. + + ---- + + StatementResponse contains `statement_id` and `status`; other fields might be absent or present + depending on context. If the SQL warehouse fails to execute the provided statement, a 200 response is + returned with `status.state` set to `FAILED` (in contrast to a failure when accepting the request, + which results in a non-200 response). Details of the error can be found at `status.error` in case of + execution failures. + + [Security best practices]: https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices :param statement: str The SQL statement to execute. The statement can optionally be parameterized, see `parameters`. The @@ -8926,12 +9109,32 @@ def execute_statement( representations and might not match the final size in the requested `format`. If the result was truncated due to the byte limit, then `truncated` in the response is set to `true`. When using `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is applied if `byte_limit` is not - explcitly set. + explicitly set. :param catalog: str (optional) Sets default catalog for statement execution, similar to [`USE CATALOG`] in SQL. [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html :param disposition: :class:`Disposition` (optional) + The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`. + + Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY` + format, in a series of chunks. If a given statement produces a result set with a size larger than 25 + MiB, that statement execution is aborted, and no result set will be available. + + **NOTE** Byte limits are computed based upon internal representations of the result set data, and + might not match the sizes visible in JSON responses. + + Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links: + URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition + allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The + resulting links have two important properties: + + 1. They point to resources _external_ to the Databricks compute; therefore any associated + authentication information (typically a personal access token, OAuth token, or similar) _must be + removed_ when fetching from these links. + + 2. These are URLs with a specific expiration, indicated in the response. The behavior when + attempting to use an expired link is cloud specific. :param format: :class:`Format` (optional) Statement execution supports three result formats: `JSON_ARRAY` (default), `ARROW_STREAM`, and `CSV`. @@ -8982,13 +9185,13 @@ def execute_statement( For example, the following statement contains two parameters, `my_name` and `my_date`: - SELECT * FROM my_table WHERE name = :my_name AND date = :my_date + ``` SELECT * FROM my_table WHERE name = :my_name AND date = :my_date ``` The parameters can be passed in the request body as follows: - { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date", + ` { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date", "parameters": [ { "name": "my_name", "value": "the name" }, { "name": "my_date", "value": - "2020-01-01", "type": "DATE" } ] } + "2020-01-01", "type": "DATE" } ] } ` Currently, positional parameters denoted by a `?` marker are not supported by the Databricks SQL Statement Execution API. @@ -9049,15 +9252,16 @@ def execute_statement( "Content-Type": "application/json", } - res = self._api.do("POST", "/api/2.0/sql/statements/", body=body, headers=headers) + res = self._api.do("POST", "/api/2.0/sql/statements", body=body, headers=headers) return StatementResponse.from_dict(res) def get_statement(self, statement_id: str) -> StatementResponse: - """This request can be used to poll for the statement's status. When the `status.state` field is - `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the - statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the - state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and - further calls will receive an HTTP 404 response. + """This request can be used to poll for the statement's status. StatementResponse contains `statement_id` + and `status`; other fields might be absent or present depending on context. When the `status.state` + field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. + When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 + with the state set. After at least 12 hours in terminal state, the statement is removed from the + warehouse and further calls will receive an HTTP 404 response. **NOTE** This call currently might take up to 5 seconds to get the latest status and result. @@ -9082,6 +9286,7 @@ def get_statement_result_chunk_n(self, statement_id: str, chunk_index: int) -> R can be used to fetch subsequent chunks. The response structure is identical to the nested `result` element described in the :method:statementexecution/getStatement request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set. + Depending on `disposition`, the response returns chunks of data either inline, or as links. :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required @@ -9192,8 +9397,7 @@ def create( The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for - non-serverless warehouses - 0 indicates no autostop. + Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. Defaults to 120 mins :param channel: :class:`Channel` (optional) @@ -9218,7 +9422,7 @@ def create( :param max_num_clusters: int (optional) Maximum number of clusters that the autoscaler will create to handle concurrent queries. - Supported values: - Must be >= min_num_clusters - Must be <= 30. + Supported values: - Must be >= min_num_clusters - Must be <= 40. Defaults to min_clusters if unset. :param min_num_clusters: int (optional) @@ -9234,12 +9438,15 @@ def create( Supported values: - Must be unique within an org. - Must be less than 100 characters. :param spot_instance_policy: :class:`SpotInstancePolicy` (optional) + Configurations whether the endpoint should use spot instances. :param tags: :class:`EndpointTags` (optional) A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - Number of tags < 45. :param warehouse_type: :class:`CreateWarehouseRequestWarehouseType` (optional) + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and + also set the field `enable_serverless_compute` to `true`. :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. @@ -9378,13 +9585,13 @@ def edit( Defaults to false. :param enable_serverless_compute: bool (optional) - Configures whether the warehouse should use serverless compute. + Configures whether the warehouse should use serverless compute :param instance_profile_arn: str (optional) Deprecated. Instance profile used to pass IAM role to the cluster :param max_num_clusters: int (optional) Maximum number of clusters that the autoscaler will create to handle concurrent queries. - Supported values: - Must be >= min_num_clusters - Must be <= 30. + Supported values: - Must be >= min_num_clusters - Must be <= 40. Defaults to min_clusters if unset. :param min_num_clusters: int (optional) @@ -9400,12 +9607,15 @@ def edit( Supported values: - Must be unique within an org. - Must be less than 100 characters. :param spot_instance_policy: :class:`SpotInstancePolicy` (optional) + Configurations whether the endpoint should use spot instances. :param tags: :class:`EndpointTags` (optional) A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - Number of tags < 45. :param warehouse_type: :class:`EditWarehouseRequestWarehouseType` (optional) + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and + also set the field `enable_serverless_compute` to `true`. :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. @@ -9444,7 +9654,7 @@ def edit( } op_response = self._api.do("POST", f"/api/2.0/sql/warehouses/{id}/edit", body=body, headers=headers) - return Wait(self.wait_get_warehouse_running, response=EditWarehouseResponse.from_dict(op_response), id=id) + return Wait(self.wait_get_warehouse_running, id=id) def edit_and_wait( self, @@ -9545,26 +9755,45 @@ def get_workspace_warehouse_config(self) -> GetWorkspaceWarehouseConfigResponse: res = self._api.do("GET", "/api/2.0/sql/config/warehouses", headers=headers) return GetWorkspaceWarehouseConfigResponse.from_dict(res) - def list(self, *, run_as_user_id: Optional[int] = None) -> Iterator[EndpointInfo]: + def list( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None, run_as_user_id: Optional[int] = None + ) -> Iterator[EndpointInfo]: """Lists all SQL warehouses that a user has access to. + :param page_size: int (optional) + The max number of warehouses to return. + :param page_token: str (optional) + A page token, received from a previous `ListWarehouses` call. Provide this to retrieve the + subsequent page; otherwise the first will be retrieved. + + When paginating, all other parameters provided to `ListWarehouses` must match the call that provided + the page token. :param run_as_user_id: int (optional) - Service Principal which will be used to fetch the list of warehouses. If not specified, the user - from the session header is used. + Service Principal which will be used to fetch the list of endpoints. If not specified, SQL Gateway + will use the user from the session header. :returns: Iterator over :class:`EndpointInfo` """ query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token if run_as_user_id is not None: query["run_as_user_id"] = run_as_user_id headers = { "Accept": "application/json", } - json = self._api.do("GET", "/api/2.0/sql/warehouses", query=query, headers=headers) - parsed = ListWarehousesResponse.from_dict(json).warehouses - return parsed if parsed is not None else [] + while True: + json = self._api.do("GET", "/api/2.0/sql/warehouses", query=query, headers=headers) + if "warehouses" in json: + for v in json["warehouses"]: + yield EndpointInfo.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] def set_permissions( self, warehouse_id: str, *, access_control_list: Optional[List[WarehouseAccessControlRequest]] = None @@ -9595,6 +9824,7 @@ def set_workspace_warehouse_config( channel: Optional[Channel] = None, config_param: Optional[RepeatedEndpointConfPairs] = None, data_access_config: Optional[List[EndpointConfPair]] = None, + enable_serverless_compute: Optional[bool] = None, enabled_warehouse_types: Optional[List[WarehouseTypePair]] = None, global_param: Optional[RepeatedEndpointConfPairs] = None, google_service_account: Optional[str] = None, @@ -9610,6 +9840,8 @@ def set_workspace_warehouse_config( Deprecated: Use sql_configuration_parameters :param data_access_config: List[:class:`EndpointConfPair`] (optional) Spark confs for external hive metastore configuration JSON serialized size must be less than <= 512K + :param enable_serverless_compute: bool (optional) + Enable Serverless compute for SQL warehouses :param enabled_warehouse_types: List[:class:`WarehouseTypePair`] (optional) List of Warehouse Types allowed in this workspace (limits allowed value of the type field in CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be @@ -9621,7 +9853,8 @@ def set_workspace_warehouse_config( :param google_service_account: str (optional) GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage :param instance_profile_arn: str (optional) - AWS Only: Instance profile used to pass IAM role to the cluster + AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This configuration is + also applied to the workspace's serverless compute for notebooks and jobs. :param security_policy: :class:`SetWorkspaceWarehouseConfigRequestSecurityPolicy` (optional) Security policy for warehouses :param sql_configuration_parameters: :class:`RepeatedEndpointConfPairs` (optional) @@ -9636,6 +9869,8 @@ def set_workspace_warehouse_config( body["config_param"] = config_param.as_dict() if data_access_config is not None: body["data_access_config"] = [v.as_dict() for v in data_access_config] + if enable_serverless_compute is not None: + body["enable_serverless_compute"] = enable_serverless_compute if enabled_warehouse_types is not None: body["enabled_warehouse_types"] = [v.as_dict() for v in enabled_warehouse_types] if global_param is not None: @@ -9671,7 +9906,7 @@ def start(self, id: str) -> Wait[GetWarehouseResponse]: } op_response = self._api.do("POST", f"/api/2.0/sql/warehouses/{id}/start", headers=headers) - return Wait(self.wait_get_warehouse_running, response=StartWarehouseResponse.from_dict(op_response), id=id) + return Wait(self.wait_get_warehouse_running, id=id) def start_and_wait(self, id: str, timeout=timedelta(minutes=20)) -> GetWarehouseResponse: return self.start(id=id).result(timeout=timeout) @@ -9692,7 +9927,7 @@ def stop(self, id: str) -> Wait[GetWarehouseResponse]: } op_response = self._api.do("POST", f"/api/2.0/sql/warehouses/{id}/stop", headers=headers) - return Wait(self.wait_get_warehouse_stopped, response=StopWarehouseResponse.from_dict(op_response), id=id) + return Wait(self.wait_get_warehouse_stopped, id=id) def stop_and_wait(self, id: str, timeout=timedelta(minutes=20)) -> GetWarehouseResponse: return self.stop(id=id).result(timeout=timeout) diff --git a/databricks/sdk/service/tags.py b/databricks/sdk/service/tags.py index 9fa90681a..f643984ed 100755 --- a/databricks/sdk/service/tags.py +++ b/databricks/sdk/service/tags.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from typing import Any, Dict, Iterator, List, Optional -from ._internal import _repeated_dict +from databricks.sdk.service._internal import _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index 8e706ccd6..4b75889f1 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -10,8 +10,10 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.service._internal import (Wait, _enum, _from_dict, + _repeated_dict) + from ..errors import OperationFailed -from ._internal import Wait, _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") diff --git a/databricks/sdk/service/workspace.py b/databricks/sdk/service/workspace.py index cab860d9c..1a6f39588 100755 --- a/databricks/sdk/service/workspace.py +++ b/databricks/sdk/service/workspace.py @@ -7,7 +7,7 @@ from enum import Enum from typing import Any, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_dict _LOG = logging.getLogger("databricks.sdk") @@ -100,9 +100,17 @@ class CreateCredentialsResponse: git_provider: str """The Git provider associated with the credential.""" + git_email: Optional[str] = None + """The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required + for most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider""" + git_username: Optional[str] = None - """The username or email provided with your Git provider account and associated with the - credential.""" + """The username provided with your Git provider account and associated with the credential. For + most Git providers it is only used to set the Git committer & author names for commits, however + it may be required for authentication depending on your Git provider / token requirements. + Required for AWS CodeCommit.""" is_default_for_provider: Optional[bool] = None """if the credential is the default for the given provider""" @@ -115,6 +123,8 @@ def as_dict(self) -> dict: body = {} if self.credential_id is not None: body["credential_id"] = self.credential_id + if self.git_email is not None: + body["git_email"] = self.git_email if self.git_provider is not None: body["git_provider"] = self.git_provider if self.git_username is not None: @@ -130,6 +140,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.credential_id is not None: body["credential_id"] = self.credential_id + if self.git_email is not None: + body["git_email"] = self.git_email if self.git_provider is not None: body["git_provider"] = self.git_provider if self.git_username is not None: @@ -145,6 +157,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateCredentialsResponse: """Deserializes the CreateCredentialsResponse from a dictionary.""" return cls( credential_id=d.get("credential_id", None), + git_email=d.get("git_email", None), git_provider=d.get("git_provider", None), git_username=d.get("git_username", None), is_default_for_provider=d.get("is_default_for_provider", None), @@ -250,12 +263,20 @@ class CredentialInfo: credential_id: int """ID of the credential object in the workspace.""" + git_email: Optional[str] = None + """The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required + for most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider""" + git_provider: Optional[str] = None """The Git provider associated with the credential.""" git_username: Optional[str] = None - """The username or email provided with your Git provider account and associated with the - credential.""" + """The username provided with your Git provider account and associated with the credential. For + most Git providers it is only used to set the Git committer & author names for commits, however + it may be required for authentication depending on your Git provider / token requirements. + Required for AWS CodeCommit.""" is_default_for_provider: Optional[bool] = None """if the credential is the default for the given provider""" @@ -268,6 +289,8 @@ def as_dict(self) -> dict: body = {} if self.credential_id is not None: body["credential_id"] = self.credential_id + if self.git_email is not None: + body["git_email"] = self.git_email if self.git_provider is not None: body["git_provider"] = self.git_provider if self.git_username is not None: @@ -283,6 +306,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.credential_id is not None: body["credential_id"] = self.credential_id + if self.git_email is not None: + body["git_email"] = self.git_email if self.git_provider is not None: body["git_provider"] = self.git_provider if self.git_username is not None: @@ -298,6 +323,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CredentialInfo: """Deserializes the CredentialInfo from a dictionary.""" return cls( credential_id=d.get("credential_id", None), + git_email=d.get("git_email", None), git_provider=d.get("git_provider", None), git_username=d.get("git_username", None), is_default_for_provider=d.get("is_default_for_provider", None), @@ -466,12 +492,20 @@ class GetCredentialsResponse: credential_id: int """ID of the credential object in the workspace.""" + git_email: Optional[str] = None + """The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required + for most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider""" + git_provider: Optional[str] = None """The Git provider associated with the credential.""" git_username: Optional[str] = None - """The username or email provided with your Git provider account and associated with the - credential.""" + """The username provided with your Git provider account and associated with the credential. For + most Git providers it is only used to set the Git committer & author names for commits, however + it may be required for authentication depending on your Git provider / token requirements. + Required for AWS CodeCommit.""" is_default_for_provider: Optional[bool] = None """if the credential is the default for the given provider""" @@ -484,6 +518,8 @@ def as_dict(self) -> dict: body = {} if self.credential_id is not None: body["credential_id"] = self.credential_id + if self.git_email is not None: + body["git_email"] = self.git_email if self.git_provider is not None: body["git_provider"] = self.git_provider if self.git_username is not None: @@ -499,6 +535,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.credential_id is not None: body["credential_id"] = self.credential_id + if self.git_email is not None: + body["git_email"] = self.git_email if self.git_provider is not None: body["git_provider"] = self.git_provider if self.git_username is not None: @@ -514,6 +552,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsResponse: """Deserializes the GetCredentialsResponse from a dictionary.""" return cls( credential_id=d.get("credential_id", None), + git_email=d.get("git_email", None), git_provider=d.get("git_provider", None), git_username=d.get("git_username", None), is_default_for_provider=d.get("is_default_for_provider", None), @@ -1765,6 +1804,7 @@ def create( self, git_provider: str, *, + git_email: Optional[str] = None, git_username: Optional[str] = None, is_default_for_provider: Optional[bool] = None, name: Optional[str] = None, @@ -1778,12 +1818,16 @@ def create( Git provider. This field is case-insensitive. The available Git providers are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and `awsCodeCommit`. + :param git_email: str (optional) + The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required for + most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider :param git_username: str (optional) - The username or email provided with your Git provider account, depending on which provider you are - using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may - be used. For GitLab, GitLab Enterprise Edition, email must be used. For AWS CodeCommit, BitBucket or - BitBucket Server, username must be used. For all other providers please see your provider's Personal - Access Token authentication documentation to see what is supported. + The username provided with your Git provider account and associated with the credential. For most + Git providers it is only used to set the Git committer & author names for commits, however it may be + required for authentication depending on your Git provider / token requirements. Required for AWS + CodeCommit. :param is_default_for_provider: bool (optional) if the credential is the default for the given provider :param name: str (optional) @@ -1797,6 +1841,8 @@ def create( :returns: :class:`CreateCredentialsResponse` """ body = {} + if git_email is not None: + body["git_email"] = git_email if git_provider is not None: body["git_provider"] = git_provider if git_username is not None: @@ -1866,6 +1912,7 @@ def update( credential_id: int, git_provider: str, *, + git_email: Optional[str] = None, git_username: Optional[str] = None, is_default_for_provider: Optional[bool] = None, name: Optional[str] = None, @@ -1879,12 +1926,16 @@ def update( Git provider. This field is case-insensitive. The available Git providers are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and `awsCodeCommit`. + :param git_email: str (optional) + The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required for + most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider :param git_username: str (optional) - The username or email provided with your Git provider account, depending on which provider you are - using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may - be used. For GitLab, GitLab Enterprise Edition, email must be used. For AWS CodeCommit, BitBucket or - BitBucket Server, username must be used. For all other providers please see your provider's Personal - Access Token authentication documentation to see what is supported. + The username provided with your Git provider account and associated with the credential. For most + Git providers it is only used to set the Git committer & author names for commits, however it may be + required for authentication depending on your Git provider / token requirements. Required for AWS + CodeCommit. :param is_default_for_provider: bool (optional) if the credential is the default for the given provider :param name: str (optional) @@ -1898,6 +1949,8 @@ def update( """ body = {} + if git_email is not None: + body["git_email"] = git_email if git_provider is not None: body["git_provider"] = git_provider if git_username is not None: diff --git a/docs/account/catalog/metastore_assignments.rst b/docs/account/catalog/metastore_assignments.rst index 4463ef712..db3913d3f 100644 --- a/docs/account/catalog/metastore_assignments.rst +++ b/docs/account/catalog/metastore_assignments.rst @@ -6,7 +6,7 @@ These APIs manage metastore assignments to a workspace. - .. py:method:: create(workspace_id: int, metastore_id: str [, metastore_assignment: Optional[CreateMetastoreAssignment]]) + .. py:method:: create(workspace_id: int, metastore_id: str [, metastore_assignment: Optional[CreateMetastoreAssignment]]) -> AccountsCreateMetastoreAssignmentResponse Creates an assignment to a metastore for a workspace @@ -16,10 +16,10 @@ Unity Catalog metastore ID :param metastore_assignment: :class:`CreateMetastoreAssignment` (optional) - + :returns: :class:`AccountsCreateMetastoreAssignmentResponse` - .. py:method:: delete(workspace_id: int, metastore_id: str) + .. py:method:: delete(workspace_id: int, metastore_id: str) -> AccountsDeleteMetastoreAssignmentResponse Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. @@ -28,14 +28,14 @@ :param metastore_id: str Unity Catalog metastore ID - + :returns: :class:`AccountsDeleteMetastoreAssignmentResponse` .. py:method:: get(workspace_id: int) -> AccountsMetastoreAssignment Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is assigned - a metastore, the mappig will be returned. If no metastore is assigned to the workspace, the assignment - will not be found and a 404 returned. + a metastore, the mapping will be returned. If no metastore is assigned to the workspace, the + assignment will not be found and a 404 returned. :param workspace_id: int Workspace ID. @@ -66,7 +66,7 @@ :returns: Iterator over int - .. py:method:: update(workspace_id: int, metastore_id: str [, metastore_assignment: Optional[UpdateMetastoreAssignment]]) + .. py:method:: update(workspace_id: int, metastore_id: str [, metastore_assignment: Optional[UpdateMetastoreAssignment]]) -> AccountsUpdateMetastoreAssignmentResponse Updates an assignment to a metastore for a workspace. Currently, only the default catalog may be updated. @@ -77,5 +77,5 @@ Unity Catalog metastore ID :param metastore_assignment: :class:`UpdateMetastoreAssignment` (optional) - + :returns: :class:`AccountsUpdateMetastoreAssignmentResponse` \ No newline at end of file diff --git a/docs/account/catalog/metastores.rst b/docs/account/catalog/metastores.rst index bb34cf6eb..0d1c01e07 100644 --- a/docs/account/catalog/metastores.rst +++ b/docs/account/catalog/metastores.rst @@ -7,16 +7,16 @@ These APIs manage Unity Catalog metastores for an account. A metastore contains catalogs that can be associated with workspaces - .. py:method:: create( [, metastore_info: Optional[CreateMetastore]]) -> AccountsMetastoreInfo + .. py:method:: create( [, metastore_info: Optional[CreateAccountsMetastore]]) -> AccountsCreateMetastoreResponse Creates a Unity Catalog metastore. - :param metastore_info: :class:`CreateMetastore` (optional) + :param metastore_info: :class:`CreateAccountsMetastore` (optional) - :returns: :class:`AccountsMetastoreInfo` + :returns: :class:`AccountsCreateMetastoreResponse` - .. py:method:: delete(metastore_id: str [, force: Optional[bool]]) + .. py:method:: delete(metastore_id: str [, force: Optional[bool]]) -> AccountsDeleteMetastoreResponse Deletes a Unity Catalog metastore for an account, both specified by ID. @@ -25,17 +25,17 @@ :param force: bool (optional) Force deletion even if the metastore is not empty. Default is false. - + :returns: :class:`AccountsDeleteMetastoreResponse` - .. py:method:: get(metastore_id: str) -> AccountsMetastoreInfo + .. py:method:: get(metastore_id: str) -> AccountsGetMetastoreResponse Gets a Unity Catalog metastore from an account, both specified by ID. :param metastore_id: str Unity Catalog metastore ID - :returns: :class:`AccountsMetastoreInfo` + :returns: :class:`AccountsGetMetastoreResponse` .. py:method:: list() -> Iterator[MetastoreInfo] @@ -46,13 +46,14 @@ :returns: Iterator over :class:`MetastoreInfo` - .. py:method:: update(metastore_id: str [, metastore_info: Optional[UpdateMetastore]]) -> AccountsMetastoreInfo + .. py:method:: update(metastore_id: str [, metastore_info: Optional[UpdateAccountsMetastore]]) -> AccountsUpdateMetastoreResponse Updates an existing Unity Catalog metastore. :param metastore_id: str Unity Catalog metastore ID - :param metastore_info: :class:`UpdateMetastore` (optional) + :param metastore_info: :class:`UpdateAccountsMetastore` (optional) + Properties of the metastore to change. - :returns: :class:`AccountsMetastoreInfo` + :returns: :class:`AccountsUpdateMetastoreResponse` \ No newline at end of file diff --git a/docs/account/catalog/storage_credentials.rst b/docs/account/catalog/storage_credentials.rst index f62632e8f..21ef2fdd8 100644 --- a/docs/account/catalog/storage_credentials.rst +++ b/docs/account/catalog/storage_credentials.rst @@ -6,24 +6,26 @@ These APIs manage storage credentials for a particular metastore. - .. py:method:: create(metastore_id: str [, credential_info: Optional[CreateStorageCredential]]) -> AccountsStorageCredentialInfo + .. py:method:: create(metastore_id: str [, credential_info: Optional[CreateAccountsStorageCredential], skip_validation: Optional[bool]]) -> AccountsCreateStorageCredentialInfo - Creates a new storage credential. The request object is specific to the cloud: + Creates a new storage credential. The request object is specific to the cloud: - **AwsIamRole** for + AWS credentials - **AzureServicePrincipal** for Azure credentials - **GcpServiceAccountKey** for GCP + credentials - * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials * - **GcpServiceAcountKey** for GCP credentials. - - The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the + The caller must be a metastore admin and have the `CREATE_STORAGE_CREDENTIAL` privilege on the metastore. :param metastore_id: str Unity Catalog metastore ID - :param credential_info: :class:`CreateStorageCredential` (optional) + :param credential_info: :class:`CreateAccountsStorageCredential` (optional) + :param skip_validation: bool (optional) + Optional, default false. Supplying true to this argument skips validation of the created set of + credentials. - :returns: :class:`AccountsStorageCredentialInfo` + :returns: :class:`AccountsCreateStorageCredentialInfo` - .. py:method:: delete(metastore_id: str, storage_credential_name: str [, force: Optional[bool]]) + .. py:method:: delete(metastore_id: str, storage_credential_name: str [, force: Optional[bool]]) -> AccountsDeleteStorageCredentialResponse Deletes a storage credential from the metastore. The caller must be an owner of the storage credential. @@ -35,7 +37,7 @@ :param force: bool (optional) Force deletion even if the Storage Credential is not empty. Default is false. - + :returns: :class:`AccountsDeleteStorageCredentialResponse` .. py:method:: get(metastore_id: str, storage_credential_name: str) -> AccountsStorageCredentialInfo @@ -46,7 +48,7 @@ :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str - Name of the storage credential. + Required. Name of the storage credential. :returns: :class:`AccountsStorageCredentialInfo` @@ -61,16 +63,18 @@ :returns: Iterator over :class:`StorageCredentialInfo` - .. py:method:: update(metastore_id: str, storage_credential_name: str [, credential_info: Optional[UpdateStorageCredential]]) -> AccountsStorageCredentialInfo + .. py:method:: update(metastore_id: str, storage_credential_name: str [, credential_info: Optional[UpdateAccountsStorageCredential], skip_validation: Optional[bool]]) -> AccountsUpdateStorageCredentialResponse Updates a storage credential on the metastore. The caller must be the owner of the storage credential. - If the caller is a metastore admin, only the __owner__ credential can be changed. + If the caller is a metastore admin, only the **owner** credential can be changed. :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str Name of the storage credential. - :param credential_info: :class:`UpdateStorageCredential` (optional) + :param credential_info: :class:`UpdateAccountsStorageCredential` (optional) + :param skip_validation: bool (optional) + Optional. Supplying true to this argument skips validation of the updated set of credentials. - :returns: :class:`AccountsStorageCredentialInfo` + :returns: :class:`AccountsUpdateStorageCredentialResponse` \ No newline at end of file diff --git a/docs/account/iam/access_control.rst b/docs/account/iam/access_control.rst index 0e271f62e..3dc570ba1 100644 --- a/docs/account/iam/access_control.rst +++ b/docs/account/iam/access_control.rst @@ -19,7 +19,8 @@ Examples | Summary :--- | :--- `resource=accounts/` | A resource name for the account. `resource=accounts//groups/` | A resource name for the group. `resource=accounts//servicePrincipals/` | A resource name for the service - principal. + principal. `resource=accounts//tagPolicies/` | A resource name for the + tag policy. :returns: :class:`GetAssignableRolesForResourceResponse` @@ -37,6 +38,8 @@ set on the group. `name=accounts//servicePrincipals//ruleSets/default` | A name for a rule set on the service principal. + `name=accounts//tagPolicies//ruleSets/default` | A name for a rule set on + the tag policy. :param etag: str Etag used for versioning. The response is at least as fresh as the eTag provided. Etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a rule set from diff --git a/docs/account/iam/groups_v2.rst b/docs/account/iam/groups_v2.rst new file mode 100644 index 000000000..9a38fb63d --- /dev/null +++ b/docs/account/iam/groups_v2.rst @@ -0,0 +1,110 @@ +``a.groups_v2``: Account Groups +=============================== +.. currentmodule:: databricks.sdk.service.iam + +.. py:class:: AccountGroupsV2API + + Groups simplify identity management, making it easier to assign access to Databricks account, data, and + other securable objects. + + It is best practice to assign access to workspaces and access-control policies in Unity Catalog to groups, + instead of to users individually. All Databricks account identities can be assigned as members of groups, + and members inherit permissions that are assigned to their group. + + .. py:method:: create( [, display_name: Optional[str], external_id: Optional[str], id: Optional[str], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]]]) -> AccountGroup + + Creates a group in the Databricks account with a unique name, using the supplied group details. + + :param display_name: str (optional) + String that represents a human-readable group name + :param external_id: str (optional) + :param id: str (optional) + Databricks group ID + :param members: List[:class:`ComplexValue`] (optional) + :param meta: :class:`ResourceMeta` (optional) + Container for the group identifier. Workspace local versus account. + :param roles: List[:class:`ComplexValue`] (optional) + Indicates if the group has the admin role. + + :returns: :class:`AccountGroup` + + + .. py:method:: delete(id: str) + + Deletes a group from the Databricks account. + + :param id: str + Unique ID for a group in the Databricks account. + + + + + .. py:method:: get(id: str) -> AccountGroup + + Gets the information for a specific group in the Databricks account. + + :param id: str + Unique ID for a group in the Databricks account. + + :returns: :class:`AccountGroup` + + + .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[AccountGroup] + + Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint + will not return members. Instead, members should be retrieved by iterating through `Get group + details`. + + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. Default is 10000. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. + :param sort_order: :class:`ListSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: Iterator over :class:`AccountGroup` + + + .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) + + Partially updates the details of a group. + + :param id: str + Unique ID in the Databricks workspace. + :param operations: List[:class:`Patch`] (optional) + :param schemas: List[:class:`PatchSchema`] (optional) + The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + + + + + .. py:method:: update(id: str [, display_name: Optional[str], external_id: Optional[str], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]]]) + + Updates the details of a group by replacing the entire group entity. + + :param id: str + Databricks group ID + :param display_name: str (optional) + String that represents a human-readable group name + :param external_id: str (optional) + :param members: List[:class:`ComplexValue`] (optional) + :param meta: :class:`ResourceMeta` (optional) + Container for the group identifier. Workspace local versus account. + :param roles: List[:class:`ComplexValue`] (optional) + Indicates if the group has the admin role. + + + \ No newline at end of file diff --git a/docs/account/iam/index.rst b/docs/account/iam/index.rst index 1939a1a1e..8e84b5878 100644 --- a/docs/account/iam/index.rst +++ b/docs/account/iam/index.rst @@ -8,7 +8,7 @@ Manage users, service principals, groups and their permissions in Accounts and W :maxdepth: 1 access_control - groups - service_principals - users + groups_v2 + service_principals_v2 + users_v2 workspace_assignment \ No newline at end of file diff --git a/docs/account/iam/service_principals_v2.rst b/docs/account/iam/service_principals_v2.rst new file mode 100644 index 000000000..34695eff9 --- /dev/null +++ b/docs/account/iam/service_principals_v2.rst @@ -0,0 +1,111 @@ +``a.service_principals_v2``: Account Service Principals +======================================================= +.. currentmodule:: databricks.sdk.service.iam + +.. py:class:: AccountServicePrincipalsV2API + + Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + Databricks recommends creating service principals to run production jobs or modify production data. If all + processes that act on production data run with service principals, interactive users do not need any + write, delete, or modify privileges in production. This eliminates the risk of a user overwriting + production data by accident. + + .. py:method:: create( [, active: Optional[bool], application_id: Optional[str], display_name: Optional[str], external_id: Optional[str], id: Optional[str], roles: Optional[List[ComplexValue]]]) -> AccountServicePrincipal + + Creates a new service principal in the Databricks account. + + :param active: bool (optional) + If this user is active + :param application_id: str (optional) + UUID relating to the service principal + :param display_name: str (optional) + String that represents a concatenation of given and family names. + :param external_id: str (optional) + :param id: str (optional) + Databricks service principal ID. + :param roles: List[:class:`ComplexValue`] (optional) + Indicates if the group has the admin role. + + :returns: :class:`AccountServicePrincipal` + + + .. py:method:: delete(id: str) + + Delete a single service principal in the Databricks account. + + :param id: str + Unique ID for a service principal in the Databricks account. + + + + + .. py:method:: get(id: str) -> AccountServicePrincipal + + Gets the details for a single service principal define in the Databricks account. + + :param id: str + Unique ID for a service principal in the Databricks account. + + :returns: :class:`AccountServicePrincipal` + + + .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[AccountServicePrincipal] + + Gets the set of service principals associated with a Databricks account. + + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. Default is 10000. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. + :param sort_order: :class:`ListSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: Iterator over :class:`AccountServicePrincipal` + + + .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) + + Partially updates the details of a single service principal in the Databricks account. + + :param id: str + Unique ID in the Databricks workspace. + :param operations: List[:class:`Patch`] (optional) + :param schemas: List[:class:`PatchSchema`] (optional) + The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + + + + + .. py:method:: update(id: str [, active: Optional[bool], application_id: Optional[str], display_name: Optional[str], external_id: Optional[str], roles: Optional[List[ComplexValue]]]) + + Updates the details of a single service principal. + + This action replaces the existing service principal with the same name. + + :param id: str + Databricks service principal ID. + :param active: bool (optional) + If this user is active + :param application_id: str (optional) + UUID relating to the service principal + :param display_name: str (optional) + String that represents a concatenation of given and family names. + :param external_id: str (optional) + :param roles: List[:class:`ComplexValue`] (optional) + Indicates if the group has the admin role. + + + \ No newline at end of file diff --git a/docs/account/iam/users_v2.rst b/docs/account/iam/users_v2.rst new file mode 100644 index 000000000..7ed80e744 --- /dev/null +++ b/docs/account/iam/users_v2.rst @@ -0,0 +1,144 @@ +``a.users_v2``: Account Users +============================= +.. currentmodule:: databricks.sdk.service.iam + +.. py:class:: AccountUsersV2API + + User identities recognized by Databricks and represented by email addresses. + + Databricks recommends using SCIM provisioning to sync users and groups automatically from your identity + provider to your Databricks account. SCIM streamlines onboarding a new employee or team by using your + identity provider to create users and groups in Databricks account and give them the proper level of + access. When a user leaves your organization or no longer needs access to Databricks account, admins can + terminate the user in your identity provider and that user’s account will also be removed from + Databricks account. This ensures a consistent offboarding process and prevents unauthorized users from + accessing sensitive data. + + .. py:method:: create( [, active: Optional[bool], display_name: Optional[str], emails: Optional[List[ComplexValue]], external_id: Optional[str], id: Optional[str], name: Optional[Name], roles: Optional[List[ComplexValue]], user_name: Optional[str]]) -> AccountUser + + Creates a new user in the Databricks account. This new user will also be added to the Databricks + account. + + :param active: bool (optional) + If this user is active + :param display_name: str (optional) + String that represents a concatenation of given and family names. For example `John Smith`. + :param emails: List[:class:`ComplexValue`] (optional) + All the emails associated with the Databricks user. + :param external_id: str (optional) + External ID is not currently supported. It is reserved for future use. + :param id: str (optional) + Databricks user ID. + :param name: :class:`Name` (optional) + :param roles: List[:class:`ComplexValue`] (optional) + Indicates if the group has the admin role. + :param user_name: str (optional) + Email address of the Databricks user. + + :returns: :class:`AccountUser` + + + .. py:method:: delete(id: str) + + Deletes a user. Deleting a user from a Databricks account also removes objects associated with the + user. + + :param id: str + Unique ID for a user in the Databricks account. + + + + + .. py:method:: get(id: str [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[GetSortOrder], start_index: Optional[int]]) -> AccountUser + + Gets information for a specific user in Databricks account. + + :param id: str + Unique ID for a user in the Databricks account. + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. Default is 10000. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. Multi-part paths are supported. For example, `userName`, + `name.givenName`, and `emails`. + :param sort_order: :class:`GetSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: :class:`AccountUser` + + + .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[AccountUser] + + Gets details for all the users associated with a Databricks account. + + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. Default is 10000. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. Multi-part paths are supported. For example, `userName`, + `name.givenName`, and `emails`. + :param sort_order: :class:`ListSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: Iterator over :class:`AccountUser` + + + .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) + + Partially updates a user resource by applying the supplied operations on specific user attributes. + + :param id: str + Unique ID in the Databricks workspace. + :param operations: List[:class:`Patch`] (optional) + :param schemas: List[:class:`PatchSchema`] (optional) + The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + + + + + .. py:method:: update(id: str [, active: Optional[bool], display_name: Optional[str], emails: Optional[List[ComplexValue]], external_id: Optional[str], name: Optional[Name], roles: Optional[List[ComplexValue]], user_name: Optional[str]]) + + Replaces a user's information with the data supplied in request. + + :param id: str + Databricks user ID. + :param active: bool (optional) + If this user is active + :param display_name: str (optional) + String that represents a concatenation of given and family names. For example `John Smith`. + :param emails: List[:class:`ComplexValue`] (optional) + All the emails associated with the Databricks user. + :param external_id: str (optional) + External ID is not currently supported. It is reserved for future use. + :param name: :class:`Name` (optional) + :param roles: List[:class:`ComplexValue`] (optional) + Indicates if the group has the admin role. + :param user_name: str (optional) + Email address of the Databricks user. + + + \ No newline at end of file diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 133b16f3d..ca78b86df 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -74,9 +74,9 @@ spn_id = spn.id - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - _ = a.workspace_assignment.update( + a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/iamv2/iam_v2.rst b/docs/account/iamv2/iam_v2.rst new file mode 100644 index 000000000..ea9c48287 --- /dev/null +++ b/docs/account/iamv2/iam_v2.rst @@ -0,0 +1,62 @@ +``a.iam_v2``: account_iam.v2 +============================ +.. currentmodule:: databricks.sdk.service.iamv2 + +.. py:class:: AccountIamV2API + + These APIs are used to manage identities and the workspace access of these identities in . + + .. py:method:: get_workspace_access_detail(workspace_id: int, principal_id: int [, view: Optional[WorkspaceAccessDetailView]]) -> WorkspaceAccessDetail + + Returns the access details for a principal in a workspace. Allows for checking access details for any + provisioned principal (user, service principal, or group) in a workspace. * Provisioned principal here + refers to one that has been synced into Databricks from the customer's IdP or added explicitly to + Databricks via SCIM/UI. Allows for passing in a "view" parameter to control what fields are returned + (BASIC by default or FULL). + + :param workspace_id: int + Required. The workspace ID for which the access details are being requested. + :param principal_id: int + Required. The internal ID of the principal (user/sp/group) for which the access details are being + requested. + :param view: :class:`WorkspaceAccessDetailView` (optional) + Controls what fields are returned. + + :returns: :class:`WorkspaceAccessDetail` + + + .. py:method:: resolve_group(external_id: str) -> ResolveGroupResponse + + Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it + will be created in the account. If the customer is not onboarded onto Automatic Identity Management + (AIM), this will return an error. + + :param external_id: str + Required. The external ID of the group in the customer's IdP. + + :returns: :class:`ResolveGroupResponse` + + + .. py:method:: resolve_service_principal(external_id: str) -> ResolveServicePrincipalResponse + + Resolves an SP with the given external ID from the customer's IdP. If the SP does not exist, it will + be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the service principal in the customer's IdP. + + :returns: :class:`ResolveServicePrincipalResponse` + + + .. py:method:: resolve_user(external_id: str) -> ResolveUserResponse + + Resolves a user with the given external ID from the customer's IdP. If the user does not exist, it + will be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the user in the customer's IdP. + + :returns: :class:`ResolveUserResponse` + \ No newline at end of file diff --git a/docs/account/iamv2/index.rst b/docs/account/iamv2/index.rst new file mode 100644 index 000000000..bab3fe464 --- /dev/null +++ b/docs/account/iamv2/index.rst @@ -0,0 +1,10 @@ + +Identity and Access Management +============================== + +Manage identities and workspace access. + +.. toctree:: + :maxdepth: 1 + + iam_v2 \ No newline at end of file diff --git a/docs/account/index.rst b/docs/account/index.rst index c355016d1..2f1c55edd 100644 --- a/docs/account/index.rst +++ b/docs/account/index.rst @@ -10,6 +10,7 @@ These APIs are available from AccountClient billing/index catalog/index iam/index + iamv2/index oauth2/index provisioning/index settings/index diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index acb958c8c..d63648d58 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -54,7 +54,7 @@ :returns: :class:`Credential` - .. py:method:: delete(credentials_id: str) + .. py:method:: delete(credentials_id: str) -> Credential Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace. @@ -62,7 +62,7 @@ :param credentials_id: str Databricks Account API credential configuration ID - + :returns: :class:`Credential` .. py:method:: get(credentials_id: str) -> Credential @@ -95,7 +95,7 @@ Gets a Databricks credential configuration object for an account, both specified by ID. :param credentials_id: str - Databricks Account API credential configuration ID + Credential configuration ID :returns: :class:`Credential` @@ -113,7 +113,7 @@ configs = a.credentials.list() - Gets all Databricks credential configurations associated with an account specified by ID. + List Databricks credential configuration objects for an account, specified by ID. :returns: Iterator over :class:`Credential` diff --git a/docs/account/provisioning/encryption_keys.rst b/docs/account/provisioning/encryption_keys.rst index 3e190c250..fc053d71c 100644 --- a/docs/account/provisioning/encryption_keys.rst +++ b/docs/account/provisioning/encryption_keys.rst @@ -65,7 +65,7 @@ :returns: :class:`CustomerManagedKey` - .. py:method:: delete(customer_managed_key_id: str) + .. py:method:: delete(customer_managed_key_id: str) -> CustomerManagedKey Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace. @@ -73,7 +73,7 @@ :param customer_managed_key_id: str Databricks encryption key configuration ID. - + :returns: :class:`CustomerManagedKey` .. py:method:: get(customer_managed_key_id: str) -> CustomerManagedKey @@ -135,16 +135,7 @@ all = a.encryption_keys.list() - Gets all customer-managed key configuration objects for an account. If the key is specified as a - workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's - notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. - If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the - workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane. - - **Important**: Customer-managed keys are supported only for some deployment types, subscription types, - and AWS regions. - - This operation is available only if your account is on the E2 version of the platform. + Lists Databricks customer-managed key configurations for an account. :returns: Iterator over :class:`CustomerManagedKey` diff --git a/docs/account/provisioning/networks.rst b/docs/account/provisioning/networks.rst index d558cdcf5..6fca3675d 100644 --- a/docs/account/provisioning/networks.rst +++ b/docs/account/provisioning/networks.rst @@ -7,7 +7,7 @@ These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs. - .. py:method:: create(network_name: str [, gcp_network_info: Optional[GcpNetworkInfo], security_group_ids: Optional[List[str]], subnet_ids: Optional[List[str]], vpc_endpoints: Optional[NetworkVpcEndpoints], vpc_id: Optional[str]]) -> Network + .. py:method:: create( [, gcp_network_info: Optional[GcpNetworkInfo], network_name: Optional[str], security_group_ids: Optional[List[str]], subnet_ids: Optional[List[str]], vpc_endpoints: Optional[NetworkVpcEndpoints], vpc_id: Optional[str]]) -> Network Usage: @@ -30,9 +30,9 @@ Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets. - :param network_name: str - The human-readable name of the network configuration. :param gcp_network_info: :class:`GcpNetworkInfo` (optional) + :param network_name: str (optional) + The human-readable name of the network configuration. :param security_group_ids: List[str] (optional) IDs of one to five security groups associated with this network. Security group IDs **cannot** be used in multiple network configurations. @@ -41,13 +41,13 @@ network configurations. :param vpc_endpoints: :class:`NetworkVpcEndpoints` (optional) :param vpc_id: str (optional) - The ID of the VPC associated with this network. VPC IDs can be used in multiple network - configurations. + The ID of the VPC associated with this network configuration. VPC IDs can be used in multiple + networks. :returns: :class:`Network` - .. py:method:: delete(network_id: str) + .. py:method:: delete(network_id: str) -> Network Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace. @@ -57,7 +57,7 @@ :param network_id: str Databricks Account API network configuration ID. - + :returns: :class:`Network` .. py:method:: get(network_id: str) -> Network @@ -103,9 +103,7 @@ configs = a.networks.list() - Gets a list of all Databricks network configurations for an account, specified by ID. - - This operation is available only if your account is on the E2 version of the platform. + Lists Databricks network configurations for an account. :returns: Iterator over :class:`Network` diff --git a/docs/account/provisioning/private_access.rst b/docs/account/provisioning/private_access.rst index 8b28bcecb..70b127dd5 100644 --- a/docs/account/provisioning/private_access.rst +++ b/docs/account/provisioning/private_access.rst @@ -6,7 +6,7 @@ These APIs manage private access settings for this account. - .. py:method:: create(private_access_settings_name: str, region: str [, allowed_vpc_endpoint_ids: Optional[List[str]], private_access_level: Optional[PrivateAccessLevel], public_access_enabled: Optional[bool]]) -> PrivateAccessSettings + .. py:method:: create( [, allowed_vpc_endpoint_ids: Optional[List[str]], private_access_level: Optional[PrivateAccessLevel], private_access_settings_name: Optional[str], public_access_enabled: Optional[bool], region: Optional[str]]) -> PrivateAccessSettings Usage: @@ -28,59 +28,43 @@ # cleanup a.private_access.delete(private_access_settings_id=created.private_access_settings_id) - Creates a private access settings object, which specifies how your workspace is accessed over [AWS - PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object - referenced by ID in the workspace's `private_access_settings_id` property. + Creates a private access settings configuration, which represents network access restrictions for + workspace resources. Private access settings configure whether workspaces can be accessed from the + public internet or only from private endpoints. - You can share one private access settings with multiple workspaces in a single account. However, - private access settings are specific to AWS regions, so only workspaces in the same AWS region can use - a given private access settings object. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - - :param private_access_settings_name: str - The human-readable name of the private access settings object. - :param region: str - The cloud region for workspaces associated with this private access settings object. :param allowed_vpc_endpoint_ids: List[str] (optional) - An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering - the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in - AWS. - - Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints - that in your account that can connect to your workspace over AWS PrivateLink. - - If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this - control only works for PrivateLink connections. To control how your workspace is accessed via public - internet, see [IP access lists]. - - [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html + An array of Databricks VPC endpoint IDs. This is the Databricks ID returned when registering the VPC + endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in AWS. + Only used when private_access_level is set to ENDPOINT. This is an allow list of VPC endpoints + registered in your Databricks account that can connect to your workspace over AWS PrivateLink. Note: + If hybrid access to your workspace is enabled by setting public_access_enabled to true, this control + only works for PrivateLink connections. To control how your workspace is accessed via public + internet, see IP access lists. :param private_access_level: :class:`PrivateAccessLevel` (optional) + The private access level controls which VPC endpoints can connect to the UI or API of any workspace + that attaches this private access settings object. `ACCOUNT` level access (the default) allows only + VPC endpoints that are registered in your Databricks account connect to your workspace. `ENDPOINT` + level access allows only specified VPC endpoints connect to your workspace. For details, see + allowed_vpc_endpoint_ids. + :param private_access_settings_name: str (optional) + The human-readable name of the private access settings object. :param public_access_enabled: bool (optional) Determines if the workspace can be accessed over public internet. For fully private workspaces, you - can optionally specify `false`, but only if you implement both the front-end and the back-end - PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled. + can optionally specify false, but only if you implement both the front-end and the back-end + PrivateLink connections. Otherwise, specify true, which means that public access is enabled. + :param region: str (optional) + The AWS region for workspaces attached to this private access settings object. :returns: :class:`PrivateAccessSettings` - .. py:method:: delete(private_access_settings_id: str) - - Deletes a private access settings object, which determines how your workspace is accessed over [AWS - PrivateLink]. + .. py:method:: delete(private_access_settings_id: str) -> PrivateAccessSettings - Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Deletes a Databricks private access settings configuration, both specified by ID. :param private_access_settings_id: str - Databricks Account API private access settings ID. - + :returns: :class:`PrivateAccessSettings` .. py:method:: get(private_access_settings_id: str) -> PrivateAccessSettings @@ -107,16 +91,9 @@ # cleanup a.private_access.delete(private_access_settings_id=created.private_access_settings_id) - Gets a private access settings object, which specifies how your workspace is accessed over [AWS - PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Gets a Databricks private access settings configuration, both specified by ID. :param private_access_settings_id: str - Databricks Account API private access settings ID. :returns: :class:`PrivateAccessSettings` @@ -134,13 +111,13 @@ all = a.private_access.list() - Gets a list of all private access settings objects for an account, specified by ID. + Lists Databricks private access settings for an account. :returns: Iterator over :class:`PrivateAccessSettings` - .. py:method:: replace(private_access_settings_id: str, private_access_settings_name: str, region: str [, allowed_vpc_endpoint_ids: Optional[List[str]], private_access_level: Optional[PrivateAccessLevel], public_access_enabled: Optional[bool]]) + .. py:method:: replace(private_access_settings_id: str, customer_facing_private_access_settings: PrivateAccessSettings) -> PrivateAccessSettings Usage: @@ -169,47 +146,20 @@ a.private_access.delete(private_access_settings_id=created.private_access_settings_id) Updates an existing private access settings object, which specifies how your workspace is accessed - over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object - referenced by ID in the workspace's `private_access_settings_id` property. - - This operation completely overwrites your existing private access settings object attached to your - workspaces. All workspaces attached to the private access settings are affected by any change. If - `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of - these changes might take several minutes to propagate to the workspace API. - - You can share one private access settings object with multiple workspaces in a single account. - However, private access settings are specific to AWS regions, so only workspaces in the same AWS - region can use a given private access settings object. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access settings object + referenced by ID in the workspace's private_access_settings_id property. This operation completely + overwrites your existing private access settings object attached to your workspaces. All workspaces + attached to the private access settings are affected by any change. If public_access_enabled, + private_access_level, or allowed_vpc_endpoint_ids are updated, effects of these changes might take + several minutes to propagate to the workspace API. You can share one private access settings object + with multiple workspaces in a single account. However, private access settings are specific to AWS + regions, so only workspaces in the same AWS region can use a given private access settings object. + Before configuring PrivateLink, read the Databricks article about PrivateLink. :param private_access_settings_id: str - Databricks Account API private access settings ID. - :param private_access_settings_name: str - The human-readable name of the private access settings object. - :param region: str - The cloud region for workspaces associated with this private access settings object. - :param allowed_vpc_endpoint_ids: List[str] (optional) - An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering - the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in - AWS. - - Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints - that in your account that can connect to your workspace over AWS PrivateLink. - - If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this - control only works for PrivateLink connections. To control how your workspace is accessed via public - internet, see [IP access lists]. - - [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html - :param private_access_level: :class:`PrivateAccessLevel` (optional) - :param public_access_enabled: bool (optional) - Determines if the workspace can be accessed over public internet. For fully private workspaces, you - can optionally specify `false`, but only if you implement both the front-end and the back-end - PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled. - + Databricks private access settings ID. + :param customer_facing_private_access_settings: :class:`PrivateAccessSettings` + Properties of the new private access settings object. + :returns: :class:`PrivateAccessSettings` \ No newline at end of file diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index a72721a6d..25ee5abaa 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -9,14 +9,13 @@ root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace. - .. py:method:: create(storage_configuration_name: str, root_bucket_info: RootBucketInfo) -> StorageConfiguration + .. py:method:: create(storage_configuration_name: str, root_bucket_info: RootBucketInfo [, role_arn: Optional[str]]) -> StorageConfiguration Usage: .. code-block:: - import os import time from databricks.sdk import AccountClient @@ -26,38 +25,33 @@ storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=storage.storage_configuration_id) - - Creates new storage configuration for an account, specified by ID. Uploads a storage configuration - object that represents the root AWS S3 bucket in your account. Databricks stores related workspace - assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the - required bucket policy. - For information about how to create a new workspace with this API, see [Create a new workspace using - the Account API] - - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Creates a Databricks storage configuration for an account. :param storage_configuration_name: str The human-readable name of the storage configuration. :param root_bucket_info: :class:`RootBucketInfo` + Root S3 bucket information. + :param role_arn: str (optional) + Optional IAM role that is used to access the workspace catalog which is created during workspace + creation for UC by Default. If a storage configuration with this field populated is used to create a + workspace, then a workspace catalog is created together with the workspace. The workspace catalog + shares the root bucket with internal workspace storage (including DBFS root) but uses a dedicated + bucket path prefix. :returns: :class:`StorageConfiguration` - .. py:method:: delete(storage_configuration_id: str) + .. py:method:: delete(storage_configuration_id: str) -> StorageConfiguration Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace. :param storage_configuration_id: str - Databricks Account API storage configuration ID. - + :returns: :class:`StorageConfiguration` .. py:method:: get(storage_configuration_id: str) -> StorageConfiguration @@ -84,7 +78,6 @@ Gets a Databricks storage configuration for an account, both specified by ID. :param storage_configuration_id: str - Databricks Account API storage configuration ID. :returns: :class:`StorageConfiguration` @@ -102,7 +95,7 @@ configs = a.storage.list() - Gets a list of all Databricks storage configurations for your account, specified by ID. + Lists Databricks storage configurations for an account, specified by ID. :returns: Iterator over :class:`StorageConfiguration` diff --git a/docs/account/provisioning/vpc_endpoints.rst b/docs/account/provisioning/vpc_endpoints.rst index b639f3e1b..30ab766c1 100644 --- a/docs/account/provisioning/vpc_endpoints.rst +++ b/docs/account/provisioning/vpc_endpoints.rst @@ -6,7 +6,7 @@ These APIs manage VPC endpoint configurations for this account. - .. py:method:: create(vpc_endpoint_name: str [, aws_vpc_endpoint_id: Optional[str], gcp_vpc_endpoint_info: Optional[GcpVpcEndpointInfo], region: Optional[str]]) -> VpcEndpoint + .. py:method:: create( [, aws_vpc_endpoint_id: Optional[str], gcp_vpc_endpoint_info: Optional[GcpVpcEndpointInfo], region: Optional[str], vpc_endpoint_name: Optional[str]]) -> VpcEndpoint Usage: @@ -42,32 +42,26 @@ [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html - :param vpc_endpoint_name: str - The human-readable name of the storage configuration. :param aws_vpc_endpoint_id: str (optional) The ID of the VPC endpoint object in AWS. :param gcp_vpc_endpoint_info: :class:`GcpVpcEndpointInfo` (optional) + The cloud info of this vpc endpoint. :param region: str (optional) - The AWS region in which this VPC endpoint object exists. + The region in which this VPC endpoint object exists. + :param vpc_endpoint_name: str (optional) + The human-readable name of the storage configuration. :returns: :class:`VpcEndpoint` - .. py:method:: delete(vpc_endpoint_id: str) - - Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] that can communicate - privately with Databricks over [AWS PrivateLink]. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. + .. py:method:: delete(vpc_endpoint_id: str) -> VpcEndpoint - [AWS PrivateLink]: https://aws.amazon.com/privatelink - [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration that + is associated with any workspace. :param vpc_endpoint_id: str - Databricks VPC endpoint ID. - + :returns: :class:`VpcEndpoint` .. py:method:: get(vpc_endpoint_id: str) -> VpcEndpoint @@ -120,11 +114,7 @@ all = a.vpc_endpoints.list() - Gets a list of all VPC endpoints for an account, specified by ID. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - - [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Lists Databricks VPC endpoint configurations for an account. :returns: Iterator over :class:`VpcEndpoint` diff --git a/docs/account/provisioning/workspaces.rst b/docs/account/provisioning/workspaces.rst index f217f57eb..e004e9cc0 100644 --- a/docs/account/provisioning/workspaces.rst +++ b/docs/account/provisioning/workspaces.rst @@ -11,7 +11,7 @@ These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - .. py:method:: create(workspace_name: str [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], is_no_public_ip_enabled: Optional[bool], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str]]) -> Wait[Workspace] + .. py:method:: create( [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], compute_mode: Optional[CustomerFacingComputeMode], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], workspace_name: Optional[str]]) -> Wait[Workspace] Usage: @@ -50,23 +50,46 @@ a.credentials.delete(credentials_id=role.credentials_id) a.workspaces.delete(workspace_id=waiter.workspace_id) - Creates a new workspace. + Creates a new workspace using a credential configuration and a storage configuration, an optional + network configuration (if using a customer-managed VPC), an optional managed services key + configuration (if using customer-managed keys for managed services), and an optional storage key + configuration (if using customer-managed keys for storage). The key configurations used for managed + services and storage encryption can be the same or different. + + Important: This operation is asynchronous. A response with HTTP status code 200 means the request has + been accepted and is in progress, but does not mean that the workspace deployed successfully and is + running. The initial workspace status is typically PROVISIONING. Use the workspace ID (workspace_id) + field in the response to identify the new workspace and make repeated GET requests with the workspace + ID and check its status. The workspace becomes available when the status changes to RUNNING. + + You can share one customer-managed VPC with multiple workspaces in a single account. It is not + required to create a new VPC for each workspace. However, you cannot reuse subnets or Security Groups + between workspaces. If you plan to share one VPC with multiple workspaces, make sure you size your VPC + and subnets accordingly. Because a Databricks Account API network configuration encapsulates this + information, you cannot reuse a Databricks Account API network configuration across workspaces. + + For information about how to create a new workspace with this API including error handling, see + [Create a new workspace using the Account API]. + + Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a limited + set of deployment and subscription types. If you have questions about availability, contact your + Databricks representative. + + This operation is available only if your account is on the E2 version of the platform or on a select + custom plan that allows multiple workspaces per account. - **Important**: This operation is asynchronous. A response with HTTP status code 200 means the request - has been accepted and is in progress, but does not mean that the workspace deployed successfully and - is running. The initial workspace status is typically `PROVISIONING`. Use the workspace ID - (`workspace_id`) field in the response to identify the new workspace and make repeated `GET` requests - with the workspace ID and check its status. The workspace becomes available when the status changes to - `RUNNING`. + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - :param workspace_name: str - The workspace's human-readable name. :param aws_region: str (optional) - The AWS region of the workspace's data plane. :param cloud: str (optional) - The cloud provider which the workspace uses. For Google Cloud workspaces, always set this field to - `gcp`. + The cloud name. This field always has the value `gcp`. :param cloud_resource_container: :class:`CloudResourceContainer` (optional) + :param compute_mode: :class:`CustomerFacingComputeMode` (optional) + If the compute mode is `SERVERLESS`, a serverless workspace is created that comes pre-configured + with serverless compute and default storage, providing a fully-managed, enterprise-ready SaaS + experience. This means you don't need to provide any resources managed by you, such as credentials, + storage, or network. If the compute mode is `HYBRID` (which is the default option), a classic + workspace is created that uses customer-managed resources. :param credentials_id: str (optional) ID of the workspace's credential configuration object. :param custom_tags: Dict[str,str] (optional) @@ -75,77 +98,65 @@ key can be of maximum length of 127 characters, and cannot be empty. :param deployment_name: str (optional) The deployment name defines part of the subdomain for the workspace. The workspace URL for the web - application and REST APIs is `.cloud.databricks.com`. For example, if the - deployment name is `abcsales`, your workspace URL will be `https://abcsales.cloud.databricks.com`. + application and REST APIs is .cloud.databricks.com. For example, if the + deployment name is abcsales, your workspace URL will be https://abcsales.cloud.databricks.com. Hyphens are allowed. This property supports only the set of characters that are allowed in a - subdomain. - - To set this value, you must have a deployment name prefix. Contact your Databricks account team to - add an account deployment name prefix to your account. - - Workspace deployment names follow the account prefix and a hyphen. For example, if your account's - deployment prefix is `acme` and the workspace deployment name is `workspace-1`, the JSON response - for the `deployment_name` field becomes `acme-workspace-1`. The workspace URL would be - `acme-workspace-1.cloud.databricks.com`. - - You can also set the `deployment_name` to the reserved keyword `EMPTY` if you want the deployment - name to only include the deployment prefix. For example, if your account's deployment prefix is - `acme` and the workspace deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and - the workspace URL is `acme.cloud.databricks.com`. - - This value must be unique across all non-deleted deployments across all AWS regions. - - If a new workspace omits this property, the server generates a unique deployment name for you with - the pattern `dbc-xxxxxxxx-xxxx`. + subdomain. To set this value, you must have a deployment name prefix. Contact your Databricks + account team to add an account deployment name prefix to your account. Workspace deployment names + follow the account prefix and a hyphen. For example, if your account's deployment prefix is acme and + the workspace deployment name is workspace-1, the JSON response for the deployment_name field + becomes acme-workspace-1. The workspace URL would be acme-workspace-1.cloud.databricks.com. You can + also set the deployment_name to the reserved keyword EMPTY if you want the deployment name to only + include the deployment prefix. For example, if your account's deployment prefix is acme and the + workspace deployment name is EMPTY, the deployment_name becomes acme only and the workspace URL is + acme.cloud.databricks.com. This value must be unique across all non-deleted deployments across all + AWS regions. If a new workspace omits this property, the server generates a unique deployment name + for you with the pattern dbc-xxxxxxxx-xxxx. :param gcp_managed_network_config: :class:`GcpManagedNetworkConfig` (optional) :param gke_config: :class:`GkeConfig` (optional) - :param is_no_public_ip_enabled: bool (optional) - Whether no public IP is enabled for the workspace. :param location: str (optional) - The Google Cloud region of the workspace data plane in your Google account. For example, `us-east4`. + The Google Cloud region of the workspace data plane in your Google account (for example, + `us-east4`). :param managed_services_customer_managed_key_id: str (optional) The ID of the workspace's managed services encryption key configuration object. This is used to help protect and control access to the workspace's notebooks, secrets, Databricks SQL queries, and query - history. The provided key configuration object property `use_cases` must contain `MANAGED_SERVICES`. + history. The provided key configuration object property use_cases must contain MANAGED_SERVICES. :param network_id: str (optional) + The ID of the workspace's network configuration object. To use AWS PrivateLink, this field is + required. :param pricing_tier: :class:`PricingTier` (optional) :param private_access_settings_id: str (optional) - ID of the workspace's private access settings object. Only used for PrivateLink. This ID must be - specified for customers using [AWS PrivateLink] for either front-end (user-to-workspace connection), - back-end (data plane to control plane connection), or both connection types. - - Before configuring PrivateLink, read the [Databricks article about PrivateLink].", + ID of the workspace's private access settings object. Only used for PrivateLink. You must specify + this ID if you are using [AWS PrivateLink] for either front-end (user-to-workspace connection), + back-end (data plane to control plane connection), or both connection types. Before configuring + PrivateLink, read the [Databricks article about PrivateLink].", [AWS PrivateLink]: https://aws.amazon.com/privatelink/ [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html :param storage_configuration_id: str (optional) - The ID of the workspace's storage configuration object. + ID of the workspace's storage configuration object. :param storage_customer_managed_key_id: str (optional) The ID of the workspace's storage encryption key configuration object. This is used to encrypt the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes. The - provided key configuration object property `use_cases` must contain `STORAGE`. + provided key configuration object property use_cases must contain STORAGE. + :param workspace_name: str (optional) + The human-readable name of the workspace. :returns: Long-running operation waiter for :class:`Workspace`. See :method:wait_get_workspace_running for more details. - .. py:method:: create_and_wait(workspace_name: str [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], is_no_public_ip_enabled: Optional[bool], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace - + .. py:method:: create_and_wait( [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], compute_mode: Optional[CustomerFacingComputeMode], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], workspace_name: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace - .. py:method:: delete(workspace_id: int) - Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. - However, it might take a few minutes for all workspaces resources to be deleted, depending on the size - and number of workspace resources. + .. py:method:: delete(workspace_id: int) -> Workspace - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. + Deletes a Databricks workspace, both specified by ID. :param workspace_id: int - Workspace ID. - + :returns: :class:`Workspace` .. py:method:: get(workspace_id: int) -> Workspace @@ -166,18 +177,13 @@ Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace - becomes available when the status changes to `RUNNING`. - - For information about how to create a new workspace with this API **including error handling**, see - [Create a new workspace using the Account API]. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. + becomes available when the status changes to `RUNNING`. For information about how to create a new + workspace with this API **including error handling**, see [Create a new workspace using the Account + API]. [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html :param workspace_id: int - Workspace ID. :returns: :class:`Workspace` @@ -195,16 +201,13 @@ all = a.workspaces.list() - Gets a list of all workspaces associated with an account, specified by ID. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. + Lists Databricks workspaces for an account. :returns: Iterator over :class:`Workspace` - .. py:method:: update(workspace_id: int [, aws_region: Optional[str], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], managed_services_customer_managed_key_id: Optional[str], network_connectivity_config_id: Optional[str], network_id: Optional[str], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str]]) -> Wait[Workspace] + .. py:method:: update(workspace_id: int, customer_facing_workspace: Workspace [, update_mask: Optional[str]]) -> Wait[Workspace] Usage: @@ -236,136 +239,28 @@ # cleanup a.credentials.delete(credentials_id=update_role.credentials_id) - Updates a workspace configuration for either a running workspace or a failed workspace. The elements - that can be updated varies between these two use cases. - - ### Update a failed workspace You can update a Databricks workspace configuration for failed workspace - deployment for some fields, but not all fields. For a failed workspace, this request supports updates - to the following fields only: - Credential configuration ID - Storage configuration ID - Network - configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a - failed workspace only, you can convert a workspace with Databricks-managed VPC to use a - customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC - to be a Databricks-managed VPC. You can update the network configuration for a failed or running - workspace to add PrivateLink support, though you must also add a private access settings object. - Key - configuration ID for managed services (control plane storage, such as notebook source and Databricks - SQL queries). Used only if you use customer-managed keys for managed services. - Key configuration ID - for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use - customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running - state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID - for workspace storage. - Private access settings ID to add PrivateLink support. You can add or update - the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both - types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink - support on a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be - applied. - Network connectivity configuration ID to add serverless stable IP support. You can add or - update the network connectivity configuration ID to ensure the workspace uses the same set of stable - IP CIDR blocks to access your resources. You cannot remove a network connectivity configuration from - the workspace once attached, you can only switch to another one. - - After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` - requests with the workspace ID and check the workspace status. The workspace is successful if the - status changes to `RUNNING`. - - For information about how to create a new workspace with this API **including error handling**, see - [Create a new workspace using the Account API]. - - ### Update a running workspace You can update a Databricks workspace configuration for running - workspaces for some fields, but not all fields. For a running workspace, this request supports - updating the following fields only: - Credential configuration ID - Network configuration ID. Used - only if you already use a customer-managed VPC. You cannot convert a running workspace from a - Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in this - API for a failed or running workspace to add support for PrivateLink, although you also need to add a - private access settings object. - Key configuration ID for managed services (control plane storage, - such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data - with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) - that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to - encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK - for managed services, adding this ID enables managed services encryption for new or updated data. - Existing managed services data that existed before adding the key remains not encrypted with the DEK - until it is modified. If the workspace already has customer-managed keys for managed services, this - request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key - configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this - only if the workspace does not already have a customer-managed key configuration for workspace - storage. - Private access settings ID to add PrivateLink support. You can add or update the private - access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of - connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on - a workspace. - Custom tags. Given you provide an empty custom tags, the update would not be applied. - - Network connectivity configuration ID to add serverless stable IP support. You can add or update the - network connectivity configuration ID to ensure the workspace uses the same set of stable IP CIDR - blocks to access your resources. You cannot remove a network connectivity configuration from the - workspace once attached, you can only switch to another one. - - **Important**: To update a running workspace, your workspace must have no running compute resources - that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose - clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not - terminate all cluster instances in the workspace before calling this API, the request will fail. - - ### Wait until changes take effect. After calling the `PATCH` operation to update the workspace - configuration, make repeated `GET` requests with the workspace ID and check the workspace status and - the status of the fields. * For workspaces with a Databricks-managed VPC, the workspace status becomes - `PROVISIONING` temporarily (typically under 20 minutes). If the workspace update is successful, the - workspace status changes to `RUNNING`. Note that you can also check the workspace status in the - [Account Console]. However, you cannot use or create clusters for another 20 minutes after that status - change. This results in a total of up to 40 minutes in which you cannot create clusters. If you create - or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could - cause other unexpected behavior. * For workspaces with a customer-managed VPC, the workspace status - stays at status `RUNNING` and the VPC change happens immediately. A change to the storage - customer-managed key configuration ID might take a few minutes to update, so continue to check the - workspace until you observe that it has been updated. If the update fails, the workspace might revert - silently to its original configuration. After the workspace has been updated, you cannot use or create - clusters for another 20 minutes. If you create or use clusters before this time interval elapses, - clusters do not launch successfully, fail, or could cause other unexpected behavior. - - If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the changes - to fully take effect. During the 20 minute wait, it is important that you stop all REST API calls to - the DBFS API. If you are modifying _only the managed services key configuration_, you can omit the 20 - minute wait. - - **Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment - types and subscription types. If you have questions about availability, contact your Databricks - representative. - - This operation is available only if your account is on the E2 version of the platform or on a select - custom plan that allows multiple workspaces per account. - - [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html - [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Updates a workspace. :param workspace_id: int - Workspace ID. - :param aws_region: str (optional) - The AWS region of the workspace's data plane (for example, `us-west-2`). This parameter is available - only for updating failed workspaces. - :param credentials_id: str (optional) - ID of the workspace's credential configuration object. This parameter is available for updating both - failed and running workspaces. - :param custom_tags: Dict[str,str] (optional) - The custom tags key-value pairing that is attached to this workspace. The key-value pair is a string - of utf-8 characters. The value can be an empty string, with maximum length of 255 characters. The - key can be of maximum length of 127 characters, and cannot be empty. - :param managed_services_customer_managed_key_id: str (optional) - The ID of the workspace's managed services encryption key configuration object. This parameter is - available only for updating failed workspaces. - :param network_connectivity_config_id: str (optional) - :param network_id: str (optional) - The ID of the workspace's network configuration object. Used only if you already use a - customer-managed VPC. For failed workspaces only, you can switch from a Databricks-managed VPC to a - customer-managed VPC by updating the workspace to add a network configuration ID. - :param private_access_settings_id: str (optional) - The ID of the workspace's private access settings configuration object. This parameter is available - only for updating failed workspaces. - :param storage_configuration_id: str (optional) - The ID of the workspace's storage configuration object. This parameter is available only for - updating failed workspaces. - :param storage_customer_managed_key_id: str (optional) - The ID of the key configuration object for workspace storage. This parameter is available for - updating both failed and running workspaces. + A unique integer ID for the workspace + :param customer_facing_workspace: :class:`Workspace` + :param update_mask: str (optional) + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. :returns: Long-running operation waiter for :class:`Workspace`. See :method:wait_get_workspace_running for more details. - .. py:method:: update_and_wait(workspace_id: int [, aws_region: Optional[str], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], managed_services_customer_managed_key_id: Optional[str], network_connectivity_config_id: Optional[str], network_id: Optional[str], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace + .. py:method:: update_and_wait(workspace_id: int, customer_facing_workspace: Workspace [, update_mask: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace .. py:method:: wait_get_workspace_running(workspace_id: int, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[Workspace], None]]) -> Workspace diff --git a/docs/account/settingsv2/settings_v2.rst b/docs/account/settingsv2/settings_v2.rst index 87cdbd619..03224db02 100644 --- a/docs/account/settingsv2/settings_v2.rst +++ b/docs/account/settingsv2/settings_v2.rst @@ -8,7 +8,8 @@ .. py:method:: get_public_account_setting(name: str) -> Setting - Get a setting value at account level + Get a setting value at account level. See :method:settingsv2/listaccountsettingsmetadata for list of + setting available via public APIs at account level. :param name: str @@ -17,9 +18,8 @@ .. py:method:: list_account_settings_metadata( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SettingsMetadata] - List valid setting keys and metadata. These settings are available to referenced via [GET - /api/2.1/settings/{name}](#~1api~1account~1settingsv2~1getpublicaccountsetting) and [PATCH - /api/2.1/settings/{name}](#~1api~1account~1settingsv2~patchpublicaccountsetting) APIs + List valid setting keys and metadata. These settings are available to be referenced via GET + :method:settingsv2/getpublicaccountsetting and PATCH :method:settingsv2/patchpublicaccountsetting APIs :param page_size: int (optional) The maximum number of settings to return. The service may return fewer than this value. If @@ -37,7 +37,8 @@ .. py:method:: patch_public_account_setting(name: str, setting: Setting) -> Setting - Patch a setting value at account level + Patch a setting value at account level. See :method:settingsv2/listaccountsettingsmetadata for list of + setting available via public APIs at account level. :param name: str :param setting: :class:`Setting` diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 3a562163a..a79286baf 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -178,6 +178,24 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CAN_CONNECT_AND_CREATE :value: "CAN_CONNECT_AND_CREATE" +.. autoclass:: AppResourceGenieSpace + :members: + :undoc-members: + +.. py:class:: AppResourceGenieSpaceGenieSpacePermission + + .. py:attribute:: CAN_EDIT + :value: "CAN_EDIT" + + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" + + .. py:attribute:: CAN_RUN + :value: "CAN_RUN" + + .. py:attribute:: CAN_VIEW + :value: "CAN_VIEW" + .. autoclass:: AppResourceJob :members: :undoc-members: @@ -260,6 +278,28 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VOLUME :value: "VOLUME" +.. autoclass:: AppUpdate + :members: + :undoc-members: + +.. autoclass:: AppUpdateUpdateStatus + :members: + :undoc-members: + +.. py:class:: AppUpdateUpdateStatusUpdateState + + .. py:attribute:: FAILED + :value: "FAILED" + + .. py:attribute:: IN_PROGRESS + :value: "IN_PROGRESS" + + .. py:attribute:: NOT_UPDATED + :value: "NOT_UPDATED" + + .. py:attribute:: SUCCEEDED + :value: "SUCCEEDED" + .. py:class:: ApplicationState .. py:attribute:: CRASHED @@ -278,6 +318,17 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: ComputeSize + + .. py:attribute:: LARGE + :value: "LARGE" + + .. py:attribute:: LIQUID + :value: "LIQUID" + + .. py:attribute:: MEDIUM + :value: "MEDIUM" + .. py:class:: ComputeState .. py:attribute:: ACTIVE diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index ec7a54980..428ca2204 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -8,11 +8,39 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: AccountsMetastoreAssignment +.. autoclass:: AccountsCreateMetastoreAssignmentResponse + :members: + :undoc-members: + +.. autoclass:: AccountsCreateMetastoreResponse + :members: + :undoc-members: + +.. autoclass:: AccountsCreateStorageCredentialInfo + :members: + :undoc-members: + +.. autoclass:: AccountsDeleteMetastoreAssignmentResponse :members: :undoc-members: -.. autoclass:: AccountsMetastoreInfo +.. autoclass:: AccountsDeleteMetastoreResponse + :members: + :undoc-members: + +.. autoclass:: AccountsDeleteStorageCredentialResponse + :members: + :undoc-members: + +.. autoclass:: AccountsGetMetastoreResponse + :members: + :undoc-members: + +.. autoclass:: AccountsListMetastoresResponse + :members: + :undoc-members: + +.. autoclass:: AccountsMetastoreAssignment :members: :undoc-members: @@ -20,6 +48,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AccountsUpdateMetastoreAssignmentResponse + :members: + :undoc-members: + +.. autoclass:: AccountsUpdateMetastoreResponse + :members: + :undoc-members: + +.. autoclass:: AccountsUpdateStorageCredentialResponse + :members: + :undoc-members: + .. autoclass:: ArtifactAllowlistInfo :members: :undoc-members: @@ -239,7 +279,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ConnectionType - Next Id: 37 + Next Id: 38 .. py:attribute:: BIGQUERY :value: "BIGQUERY" @@ -265,6 +305,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ORACLE :value: "ORACLE" + .. py:attribute:: PALANTIR + :value: "PALANTIR" + .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" @@ -313,21 +356,25 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateAccountsMetastore + :members: + :undoc-members: + +.. autoclass:: CreateAccountsStorageCredential + :members: + :undoc-members: + .. autoclass:: CreateFunction :members: :undoc-members: .. py:class:: CreateFunctionParameterStyle - Function parameter style. **S** is the value for SQL. - .. py:attribute:: S :value: "S" .. py:class:: CreateFunctionRoutineBody - Function language. When **EXTERNAL** is used, the language of the routine function should be specified in the __external_language__ field, and the __return_params__ of the function cannot be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be **NO_SQL**. - .. py:attribute:: EXTERNAL :value: "EXTERNAL" @@ -336,15 +383,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CreateFunctionSecurityType - The security type of the function. - .. py:attribute:: DEFINER :value: "DEFINER" .. py:class:: CreateFunctionSqlDataAccess - Function SQL data access. - .. py:attribute:: CONTAINS_SQL :value: "CONTAINS_SQL" @@ -354,10 +397,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: READS_SQL_DATA :value: "READS_SQL_DATA" -.. autoclass:: CreateMetastore - :members: - :undoc-members: - .. autoclass:: CreateMetastoreAssignment :members: :undoc-members: @@ -366,14 +405,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateResponse - :members: - :undoc-members: - -.. autoclass:: CreateStorageCredential - :members: - :undoc-members: - .. autoclass:: CredentialDependency :members: :undoc-members: @@ -622,8 +653,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: EffectivePredictiveOptimizationFlagInheritedFromType - The type of the object from which the flag was inherited. If there was no inheritance, this field is left blank. - .. py:attribute:: CATALOG :value: "CATALOG" @@ -743,15 +772,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: FunctionInfoParameterStyle - Function parameter style. **S** is the value for SQL. - .. py:attribute:: S :value: "S" .. py:class:: FunctionInfoRoutineBody - Function language. When **EXTERNAL** is used, the language of the routine function should be specified in the __external_language__ field, and the __return_params__ of the function cannot be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be **NO_SQL**. - .. py:attribute:: EXTERNAL :value: "EXTERNAL" @@ -760,15 +785,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: FunctionInfoSecurityType - The security type of the function. - .. py:attribute:: DEFINER :value: "DEFINER" .. py:class:: FunctionInfoSqlDataAccess - Function SQL data access. - .. py:attribute:: CONTAINS_SQL :value: "CONTAINS_SQL" @@ -788,15 +809,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: FunctionParameterMode - The mode of the function parameter. - .. py:attribute:: IN :value: "IN" .. py:class:: FunctionParameterType - The type of function parameter. - .. py:attribute:: COLUMN :value: "COLUMN" @@ -972,11 +989,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ModelVersionInfoStatus - Current status of the model version. Newly created model versions start in PENDING_REGISTRATION status, then move to READY status once the model version files are uploaded and the model version is finalized. Only model versions in READY status can be loaded for inference or served. - .. py:attribute:: FAILED_REGISTRATION :value: "FAILED_REGISTRATION" + .. py:attribute:: MODEL_VERSION_STATUS_UNKNOWN + :value: "MODEL_VERSION_STATUS_UNKNOWN" + .. py:attribute:: PENDING_REGISTRATION :value: "PENDING_REGISTRATION" @@ -1351,6 +1369,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXECUTE_CLEAN_ROOM_TASK :value: "EXECUTE_CLEAN_ROOM_TASK" + .. py:attribute:: EXTERNAL_USE_SCHEMA + :value: "EXTERNAL_USE_SCHEMA" + .. py:attribute:: MANAGE :value: "MANAGE" @@ -1480,6 +1501,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind + Latest kind: CONNECTION_REDSHIFT_IAM = 265; Next id:266 + .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" @@ -1507,6 +1530,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TABLE_DELTA_UNIFORM_ICEBERG_EXTERNAL :value: "TABLE_DELTA_UNIFORM_ICEBERG_EXTERNAL" + .. py:attribute:: TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_DELTASHARING + :value: "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_DELTASHARING" + .. py:attribute:: TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_EXTERNAL :value: "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_EXTERNAL" @@ -1579,6 +1605,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TABLE_FOREIGN_ORACLE :value: "TABLE_FOREIGN_ORACLE" + .. py:attribute:: TABLE_FOREIGN_PALANTIR + :value: "TABLE_FOREIGN_PALANTIR" + .. py:attribute:: TABLE_FOREIGN_POSTGRESQL :value: "TABLE_FOREIGN_POSTGRESQL" @@ -1630,6 +1659,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TABLE_METRIC_VIEW :value: "TABLE_METRIC_VIEW" + .. py:attribute:: TABLE_METRIC_VIEW_DELTASHARING + :value: "TABLE_METRIC_VIEW_DELTASHARING" + .. py:attribute:: TABLE_ONLINE_VECTOR_INDEX_DIRECT :value: "TABLE_ONLINE_VECTOR_INDEX_DIRECT" @@ -1819,6 +1851,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SNOWFLAKE :value: "SNOWFLAKE" + .. py:attribute:: STREAM_NATIVE + :value: "STREAM_NATIVE" + .. py:attribute:: TABLEAU :value: "TABLEAU" @@ -1905,15 +1940,19 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateAssignmentResponse +.. autoclass:: UpdateAccountsMetastore :members: :undoc-members: -.. autoclass:: UpdateCatalogWorkspaceBindingsResponse +.. autoclass:: UpdateAccountsStorageCredential :members: :undoc-members: -.. autoclass:: UpdateMetastore +.. autoclass:: UpdateAssignmentResponse + :members: + :undoc-members: + +.. autoclass:: UpdateCatalogWorkspaceBindingsResponse :members: :undoc-members: @@ -1933,10 +1972,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateStorageCredential - :members: - :undoc-members: - .. autoclass:: UpdateWorkspaceBindingsResponse :members: :undoc-members: @@ -2004,9 +2039,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: VolumeType - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more] - [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external - .. py:attribute:: EXTERNAL :value: "EXTERNAL" diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 22b9cf41a..1af334270 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1129,6 +1129,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BUDGET_POLICY_RESOLUTION_FAILURE :value: "BUDGET_POLICY_RESOLUTION_FAILURE" + .. py:attribute:: CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED + :value: "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED" + .. py:attribute:: CLOUD_ACCOUNT_SETUP_FAILURE :value: "CLOUD_ACCOUNT_SETUP_FAILURE" @@ -1360,6 +1363,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: JOB_FINISHED :value: "JOB_FINISHED" + .. py:attribute:: K8S_ACTIVE_POD_QUOTA_EXCEEDED + :value: "K8S_ACTIVE_POD_QUOTA_EXCEEDED" + .. py:attribute:: K8S_AUTOSCALING_FAILURE :value: "K8S_AUTOSCALING_FAILURE" @@ -1405,6 +1411,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" + .. py:attribute:: NO_ACTIVATED_K8S + :value: "NO_ACTIVATED_K8S" + + .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG + :value: "NO_ACTIVATED_K8S_TESTING_TAG" + .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1519,6 +1531,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UPDATE_INSTANCE_PROFILE_FAILURE :value: "UPDATE_INSTANCE_PROFILE_FAILURE" + .. py:attribute:: USAGE_POLICY_ENTITLEMENT_DENIED + :value: "USAGE_POLICY_ENTITLEMENT_DENIED" + .. py:attribute:: USER_INITIATED_VM_TERMINATION :value: "USER_INITIATED_VM_TERMINATION" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 559da8424..394036f3a 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -37,6 +37,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GenieFeedback + :members: + :undoc-members: + .. py:class:: GenieFeedbackRating Feedback rating for Genie messages @@ -86,6 +90,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GenieSuggestedQuestionsAttachment + :members: + :undoc-members: + .. autoclass:: GetPublishedDashboardTokenInfoResponse :members: :undoc-members: @@ -152,6 +160,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DESCRIBE_QUERY_UNEXPECTED_FAILURE :value: "DESCRIBE_QUERY_UNEXPECTED_FAILURE" + .. py:attribute:: EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION + :value: "EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION" + .. py:attribute:: FUNCTIONS_NOT_AVAILABLE_EXCEPTION :value: "FUNCTIONS_NOT_AVAILABLE_EXCEPTION" @@ -182,6 +193,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ILLEGAL_PARAMETER_DEFINITION_EXCEPTION :value: "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION" + .. py:attribute:: INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION + :value: "INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION" + + .. py:attribute:: INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION + :value: "INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION" + + .. py:attribute:: INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION + :value: "INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION" + + .. py:attribute:: INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION + :value: "INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION" + + .. py:attribute:: INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION + :value: "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION" + .. py:attribute:: INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION :value: "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION" @@ -316,6 +342,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: QueryAttachmentParameter + :members: + :undoc-members: + .. autoclass:: Result :members: :undoc-members: diff --git a/docs/dbdataclasses/database.rst b/docs/dbdataclasses/database.rst index bdf7a2161..dbb238f00 100644 --- a/docs/dbdataclasses/database.rst +++ b/docs/dbdataclasses/database.rst @@ -4,6 +4,10 @@ Database Instances These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.database`` module. .. py:currentmodule:: databricks.sdk.service.database +.. autoclass:: CustomTag + :members: + :undoc-members: + .. autoclass:: DatabaseCatalog :members: :undoc-members: diff --git a/docs/dbdataclasses/dataquality.rst b/docs/dbdataclasses/dataquality.rst new file mode 100644 index 000000000..da8a02369 --- /dev/null +++ b/docs/dbdataclasses/dataquality.rst @@ -0,0 +1,185 @@ +Data Quality +============ + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.dataquality`` module. + +.. py:currentmodule:: databricks.sdk.service.dataquality +.. py:class:: AggregationGranularity + + The granularity for aggregating data into time windows based on their timestamp. + + .. py:attribute:: AGGREGATION_GRANULARITY_1_DAY + :value: "AGGREGATION_GRANULARITY_1_DAY" + + .. py:attribute:: AGGREGATION_GRANULARITY_1_HOUR + :value: "AGGREGATION_GRANULARITY_1_HOUR" + + .. py:attribute:: AGGREGATION_GRANULARITY_1_MONTH + :value: "AGGREGATION_GRANULARITY_1_MONTH" + + .. py:attribute:: AGGREGATION_GRANULARITY_1_WEEK + :value: "AGGREGATION_GRANULARITY_1_WEEK" + + .. py:attribute:: AGGREGATION_GRANULARITY_1_YEAR + :value: "AGGREGATION_GRANULARITY_1_YEAR" + + .. py:attribute:: AGGREGATION_GRANULARITY_2_WEEKS + :value: "AGGREGATION_GRANULARITY_2_WEEKS" + + .. py:attribute:: AGGREGATION_GRANULARITY_30_MINUTES + :value: "AGGREGATION_GRANULARITY_30_MINUTES" + + .. py:attribute:: AGGREGATION_GRANULARITY_3_WEEKS + :value: "AGGREGATION_GRANULARITY_3_WEEKS" + + .. py:attribute:: AGGREGATION_GRANULARITY_4_WEEKS + :value: "AGGREGATION_GRANULARITY_4_WEEKS" + + .. py:attribute:: AGGREGATION_GRANULARITY_5_MINUTES + :value: "AGGREGATION_GRANULARITY_5_MINUTES" + +.. autoclass:: AnomalyDetectionConfig + :members: + :undoc-members: + +.. autoclass:: CancelRefreshResponse + :members: + :undoc-members: + +.. autoclass:: CronSchedule + :members: + :undoc-members: + +.. py:class:: CronSchedulePauseStatus + + The data quality monitoring workflow cron schedule pause status. + + .. py:attribute:: CRON_SCHEDULE_PAUSE_STATUS_PAUSED + :value: "CRON_SCHEDULE_PAUSE_STATUS_PAUSED" + + .. py:attribute:: CRON_SCHEDULE_PAUSE_STATUS_UNPAUSED + :value: "CRON_SCHEDULE_PAUSE_STATUS_UNPAUSED" + +.. autoclass:: DataProfilingConfig + :members: + :undoc-members: + +.. autoclass:: DataProfilingCustomMetric + :members: + :undoc-members: + +.. py:class:: DataProfilingCustomMetricType + + The custom metric type. + + .. py:attribute:: DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE + :value: "DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE" + + .. py:attribute:: DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED + :value: "DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED" + + .. py:attribute:: DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT + :value: "DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT" + +.. py:class:: DataProfilingStatus + + The status of the data profiling monitor. + + .. py:attribute:: DATA_PROFILING_STATUS_ACTIVE + :value: "DATA_PROFILING_STATUS_ACTIVE" + + .. py:attribute:: DATA_PROFILING_STATUS_DELETE_PENDING + :value: "DATA_PROFILING_STATUS_DELETE_PENDING" + + .. py:attribute:: DATA_PROFILING_STATUS_ERROR + :value: "DATA_PROFILING_STATUS_ERROR" + + .. py:attribute:: DATA_PROFILING_STATUS_FAILED + :value: "DATA_PROFILING_STATUS_FAILED" + + .. py:attribute:: DATA_PROFILING_STATUS_PENDING + :value: "DATA_PROFILING_STATUS_PENDING" + +.. autoclass:: InferenceLogConfig + :members: + :undoc-members: + +.. py:class:: InferenceProblemType + + Inference problem type the model aims to solve. + + .. py:attribute:: INFERENCE_PROBLEM_TYPE_CLASSIFICATION + :value: "INFERENCE_PROBLEM_TYPE_CLASSIFICATION" + + .. py:attribute:: INFERENCE_PROBLEM_TYPE_REGRESSION + :value: "INFERENCE_PROBLEM_TYPE_REGRESSION" + +.. autoclass:: ListMonitorResponse + :members: + :undoc-members: + +.. autoclass:: ListRefreshResponse + :members: + :undoc-members: + +.. autoclass:: Monitor + :members: + :undoc-members: + +.. autoclass:: NotificationDestination + :members: + :undoc-members: + +.. autoclass:: NotificationSettings + :members: + :undoc-members: + +.. autoclass:: Refresh + :members: + :undoc-members: + +.. py:class:: RefreshState + + The state of the refresh. + + .. py:attribute:: MONITOR_REFRESH_STATE_CANCELED + :value: "MONITOR_REFRESH_STATE_CANCELED" + + .. py:attribute:: MONITOR_REFRESH_STATE_FAILED + :value: "MONITOR_REFRESH_STATE_FAILED" + + .. py:attribute:: MONITOR_REFRESH_STATE_PENDING + :value: "MONITOR_REFRESH_STATE_PENDING" + + .. py:attribute:: MONITOR_REFRESH_STATE_RUNNING + :value: "MONITOR_REFRESH_STATE_RUNNING" + + .. py:attribute:: MONITOR_REFRESH_STATE_SUCCESS + :value: "MONITOR_REFRESH_STATE_SUCCESS" + + .. py:attribute:: MONITOR_REFRESH_STATE_UNKNOWN + :value: "MONITOR_REFRESH_STATE_UNKNOWN" + +.. py:class:: RefreshTrigger + + The trigger of the refresh. + + .. py:attribute:: MONITOR_REFRESH_TRIGGER_DATA_CHANGE + :value: "MONITOR_REFRESH_TRIGGER_DATA_CHANGE" + + .. py:attribute:: MONITOR_REFRESH_TRIGGER_MANUAL + :value: "MONITOR_REFRESH_TRIGGER_MANUAL" + + .. py:attribute:: MONITOR_REFRESH_TRIGGER_SCHEDULE + :value: "MONITOR_REFRESH_TRIGGER_SCHEDULE" + + .. py:attribute:: MONITOR_REFRESH_TRIGGER_UNKNOWN + :value: "MONITOR_REFRESH_TRIGGER_UNKNOWN" + +.. autoclass:: SnapshotConfig + :members: + :undoc-members: + +.. autoclass:: TimeSeriesConfig + :members: + :undoc-members: diff --git a/docs/dbdataclasses/iam.rst b/docs/dbdataclasses/iam.rst index 96abd3743..142ec986d 100644 --- a/docs/dbdataclasses/iam.rst +++ b/docs/dbdataclasses/iam.rst @@ -12,6 +12,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AccountGroup + :members: + :undoc-members: + +.. autoclass:: AccountServicePrincipal + :members: + :undoc-members: + +.. autoclass:: AccountUser + :members: + :undoc-members: + .. autoclass:: Actor :members: :undoc-members: @@ -69,6 +81,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_GROUP :value: "URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_GROUP" +.. autoclass:: ListAccountGroupsResponse + :members: + :undoc-members: + +.. autoclass:: ListAccountServicePrincipalsResponse + :members: + :undoc-members: + +.. autoclass:: ListAccountUsersResponse + :members: + :undoc-members: + .. autoclass:: ListGroupsResponse :members: :undoc-members: @@ -283,10 +307,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_SERVICE_PRINCIPAL :value: "URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_SERVICE_PRINCIPAL" -.. autoclass:: UpdateResponse - :members: - :undoc-members: - .. autoclass:: User :members: :undoc-members: diff --git a/docs/dbdataclasses/iamv2.rst b/docs/dbdataclasses/iamv2.rst new file mode 100644 index 000000000..b77239ca6 --- /dev/null +++ b/docs/dbdataclasses/iamv2.rst @@ -0,0 +1,90 @@ +Identity and Access Management +============================== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.iamv2`` module. + +.. py:currentmodule:: databricks.sdk.service.iamv2 +.. autoclass:: Group + :members: + :undoc-members: + +.. py:class:: PrincipalType + + The type of the principal (user/sp/group). + + .. py:attribute:: GROUP + :value: "GROUP" + + .. py:attribute:: SERVICE_PRINCIPAL + :value: "SERVICE_PRINCIPAL" + + .. py:attribute:: USER + :value: "USER" + +.. autoclass:: ResolveGroupResponse + :members: + :undoc-members: + +.. autoclass:: ResolveServicePrincipalResponse + :members: + :undoc-members: + +.. autoclass:: ResolveUserResponse + :members: + :undoc-members: + +.. autoclass:: ServicePrincipal + :members: + :undoc-members: + +.. py:class:: State + + The activity status of a user or service principal in a Databricks account or workspace. + + .. py:attribute:: ACTIVE + :value: "ACTIVE" + + .. py:attribute:: INACTIVE + :value: "INACTIVE" + +.. autoclass:: User + :members: + :undoc-members: + +.. autoclass:: UserName + :members: + :undoc-members: + +.. autoclass:: WorkspaceAccessDetail + :members: + :undoc-members: + +.. py:class:: WorkspaceAccessDetailAccessType + + The type of access the principal has to the workspace. + + .. py:attribute:: DIRECT + :value: "DIRECT" + + .. py:attribute:: INDIRECT + :value: "INDIRECT" + +.. py:class:: WorkspaceAccessDetailView + + Controls what fields are returned in the GetWorkspaceAccessDetail response. + + .. py:attribute:: BASIC + :value: "BASIC" + + .. py:attribute:: FULL + :value: "FULL" + +.. py:class:: WorkspacePermission + + The type of permission a principal has to a workspace (admin/user). + + .. py:attribute:: ADMIN_PERMISSION + :value: "ADMIN_PERMISSION" + + .. py:attribute:: USER_PERMISSION + :value: "USER_PERMISSION" diff --git a/docs/dbdataclasses/index.rst b/docs/dbdataclasses/index.rst index 4b1463f0b..3d3a3af75 100644 --- a/docs/dbdataclasses/index.rst +++ b/docs/dbdataclasses/index.rst @@ -13,8 +13,10 @@ Dataclasses compute dashboards database + dataquality files iam + iamv2 jobs marketplace ml diff --git a/docs/dbdataclasses/marketplace.rst b/docs/dbdataclasses/marketplace.rst index f243184c1..96f17de29 100644 --- a/docs/dbdataclasses/marketplace.rst +++ b/docs/dbdataclasses/marketplace.rst @@ -19,6 +19,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ASSET_TYPE_GIT_REPO :value: "ASSET_TYPE_GIT_REPO" + .. py:attribute:: ASSET_TYPE_MCP + :value: "ASSET_TYPE_MCP" + .. py:attribute:: ASSET_TYPE_MEDIA :value: "ASSET_TYPE_MEDIA" diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 6042988a1..5bd7720e8 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -130,6 +130,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DataSource + :members: + :undoc-members: + .. autoclass:: Dataset :members: :undoc-members: @@ -190,6 +194,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeltaTableSource + :members: + :undoc-members: + .. autoclass:: Experiment :members: :undoc-members: @@ -231,6 +239,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Feature + :members: + :undoc-members: + .. autoclass:: FeatureLineage :members: :undoc-members: @@ -284,6 +296,55 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCEEDED :value: "SUCCEEDED" +.. autoclass:: Function + :members: + :undoc-members: + +.. autoclass:: FunctionExtraParameter + :members: + :undoc-members: + +.. py:class:: FunctionFunctionType + + .. py:attribute:: APPROX_COUNT_DISTINCT + :value: "APPROX_COUNT_DISTINCT" + + .. py:attribute:: APPROX_PERCENTILE + :value: "APPROX_PERCENTILE" + + .. py:attribute:: AVG + :value: "AVG" + + .. py:attribute:: COUNT + :value: "COUNT" + + .. py:attribute:: FIRST + :value: "FIRST" + + .. py:attribute:: LAST + :value: "LAST" + + .. py:attribute:: MAX + :value: "MAX" + + .. py:attribute:: MIN + :value: "MIN" + + .. py:attribute:: STDDEV_POP + :value: "STDDEV_POP" + + .. py:attribute:: STDDEV_SAMP + :value: "STDDEV_SAMP" + + .. py:attribute:: SUM + :value: "SUM" + + .. py:attribute:: VAR_POP + :value: "VAR_POP" + + .. py:attribute:: VAR_SAMP + :value: "VAR_SAMP" + .. autoclass:: GetExperimentByNameResponse :members: :undoc-members: @@ -364,6 +425,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListFeaturesResponse + :members: + :undoc-members: + .. autoclass:: ListModelsResponse :members: :undoc-members: @@ -799,6 +864,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: TimeWindow + :members: + :undoc-members: + .. autoclass:: TransitionRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 9c0e96bf8..b49b6bd1c 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -126,6 +126,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: IngestionPipelineDefinitionWorkdayReportParameters + :members: + :undoc-members: + +.. autoclass:: IngestionPipelineDefinitionWorkdayReportParametersQueryKeyValue + :members: + :undoc-members: + .. py:class:: IngestionSourceType .. py:attribute:: BIGQUERY @@ -137,6 +145,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DYNAMICS365 :value: "DYNAMICS365" + .. py:attribute:: FOREIGN_CATALOG + :value: "FOREIGN_CATALOG" + .. py:attribute:: GA4_RAW_DATA :value: "GA4_RAW_DATA" diff --git a/docs/dbdataclasses/provisioning.rst b/docs/dbdataclasses/provisioning.rst index 69e237d5f..3ddcfcb42 100644 --- a/docs/dbdataclasses/provisioning.rst +++ b/docs/dbdataclasses/provisioning.rst @@ -12,6 +12,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AzureKeyInfo + :members: + :undoc-members: + .. autoclass:: AzureWorkspaceInfo :members: :undoc-members: @@ -40,23 +44,34 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: CustomerFacingComputeMode + + Corresponds to compute mode defined here: https://src.dev.databricks.com/databricks/universe@9076536b18479afd639d1c1f9dd5a59f72215e69/-/blob/central/api/common.proto?L872 + + .. py:attribute:: HYBRID + :value: "HYBRID" + + .. py:attribute:: SERVERLESS + :value: "SERVERLESS" + .. autoclass:: CustomerFacingGcpCloudResourceContainer :members: :undoc-members: -.. autoclass:: CustomerManagedKey - :members: - :undoc-members: +.. py:class:: CustomerFacingStorageMode + + .. py:attribute:: CUSTOMER_HOSTED + :value: "CUSTOMER_HOSTED" + + .. py:attribute:: DEFAULT_STORAGE + :value: "DEFAULT_STORAGE" -.. autoclass:: DeleteResponse +.. autoclass:: CustomerManagedKey :members: :undoc-members: .. py:class:: EndpointUseCase - This enumeration represents the type of Databricks VPC [endpoint service] that was used when creating this VPC endpoint. - [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html - .. py:attribute:: DATAPLANE_RELAY_ACCESS :value: "DATAPLANE_RELAY_ACCESS" @@ -65,7 +80,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ErrorType - The AWS resource associated with this error: credentials, VPC, subnet, security group, or network ACL. + ErrorType and WarningType are used to represent the type of error or warning by NetworkHealth and NetworkWarning defined in central/api/accounts/accounts.proto .. py:attribute:: CREDENTIALS :value: "CREDENTIALS" @@ -82,7 +97,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VPC :value: "VPC" -.. autoclass:: ExternalCustomerInfo +.. autoclass:: GcpCommonNetworkConfig :members: :undoc-members: @@ -118,9 +133,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PUBLIC_NODE_PUBLIC_MASTER :value: "PUBLIC_NODE_PUBLIC_MASTER" -.. py:class:: KeyUseCase +.. autoclass:: KeyAccessConfiguration + :members: + :undoc-members: - Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data in the control plane * `STORAGE`: Encrypts the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes. +.. py:class:: KeyUseCase .. py:attribute:: MANAGED_SERVICES :value: "MANAGED_SERVICES" @@ -146,9 +163,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: PricingTier - The pricing tier of the workspace. For pricing tier information, see [AWS Pricing]. - [AWS Pricing]: https://databricks.com/product/aws-pricing - .. py:attribute:: COMMUNITY_EDITION :value: "COMMUNITY_EDITION" @@ -169,8 +183,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: PrivateAccessLevel - The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. * `ACCOUNT` level access (the default) allows only VPC endpoints that are registered in your Databricks account connect to your workspace. * `ENDPOINT` level access allows only specified VPC endpoints connect to your workspace. For details, see `allowed_vpc_endpoint_ids`. - .. py:attribute:: ACCOUNT :value: "ACCOUNT" @@ -181,10 +193,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ReplaceResponse - :members: - :undoc-members: - .. autoclass:: RootBucketInfo :members: :undoc-members: @@ -197,18 +205,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateResponse - :members: - :undoc-members: - .. autoclass:: VpcEndpoint :members: :undoc-members: .. py:class:: VpcStatus - The status of this network configuration object in terms of its use in a workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. * `WARNED`: Warned. - .. py:attribute:: BROKEN :value: "BROKEN" @@ -223,8 +225,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: WarningType - The AWS resource associated with this warning: a subnet or a security group. - .. py:attribute:: SECURITY_GROUP :value: "SECURITY_GROUP" @@ -235,9 +235,13 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: WorkspaceNetwork + :members: + :undoc-members: + .. py:class:: WorkspaceStatus - The status of the workspace. For workspace creation, usually it is set to `PROVISIONING` initially. Continue to check the status until the status is `RUNNING`. + The different statuses of a workspace. The following represents the current set of valid transitions from status to status: NOT_PROVISIONED -> PROVISIONING -> CANCELLED PROVISIONING -> RUNNING -> FAILED -> CANCELLED (note that this transition is disallowed in the MultiWorkspace Project) RUNNING -> PROVISIONING -> BANNED -> CANCELLED FAILED -> PROVISIONING -> CANCELLED BANNED -> RUNNING -> CANCELLED Note that a transition from any state to itself is also valid. TODO(PLAT-5867): add a transition from CANCELLED to some other value (e.g. RECOVERING) .. py:attribute:: BANNED :value: "BANNED" diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index 39b159395..db6deb399 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -517,6 +517,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateInferenceEndpointNotificationsResponse + :members: + :undoc-members: + .. autoclass:: V1ResponseChoiceElement :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index e09f827f6..cd77d7d2c 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -123,7 +123,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ComplianceStandard - Compliance stardard for SHIELD customers + Compliance standard for SHIELD customers. See README.md for how instructions of how to add new standards. .. py:attribute:: CANADA_PROTECTED_B :value: "CANADA_PROTECTED_B" @@ -143,6 +143,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: GERMANY_C5 :value: "GERMANY_C5" + .. py:attribute:: GERMANY_TISAX + :value: "GERMANY_TISAX" + .. py:attribute:: HIPAA :value: "HIPAA" diff --git a/docs/dbdataclasses/settingsv2.rst b/docs/dbdataclasses/settingsv2.rst index 583fe9aec..d15eb734f 100644 --- a/docs/dbdataclasses/settingsv2.rst +++ b/docs/dbdataclasses/settingsv2.rst @@ -93,21 +93,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DefaultDataSecurityModeMessage - :members: - :undoc-members: - -.. py:class:: DefaultDataSecurityModeMessageStatus - - .. py:attribute:: NOT_SET - :value: "NOT_SET" - - .. py:attribute:: SINGLE_USER - :value: "SINGLE_USER" - - .. py:attribute:: USER_ISOLATION - :value: "USER_ISOLATION" - .. autoclass:: IntegerMessage :members: :undoc-members: diff --git a/docs/dbdataclasses/sharing.rst b/docs/dbdataclasses/sharing.rst index 948e33bd6..43a6675e0 100644 --- a/docs/dbdataclasses/sharing.rst +++ b/docs/dbdataclasses/sharing.rst @@ -494,12 +494,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: FILE_BASED_TABLE :value: "FILE_BASED_TABLE" + .. py:attribute:: FOREIGN_ICEBERG_TABLE + :value: "FOREIGN_ICEBERG_TABLE" + .. py:attribute:: FOREIGN_TABLE :value: "FOREIGN_TABLE" .. py:attribute:: MATERIALIZED_VIEW :value: "MATERIALIZED_VIEW" + .. py:attribute:: METRIC_VIEW + :value: "METRIC_VIEW" + .. py:attribute:: STREAMING_TABLE :value: "STREAMING_TABLE" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 3dd84f229..17ebe326f 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -300,8 +300,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CreateWarehouseRequestWarehouseType - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. - .. py:attribute:: CLASSIC :value: "CLASSIC" @@ -433,8 +431,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: EditWarehouseRequestWarehouseType - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. - .. py:attribute:: CLASSIC :value: "CLASSIC" @@ -466,8 +462,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: EndpointInfoWarehouseType - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. - .. py:attribute:: CLASSIC :value: "CLASSIC" @@ -536,8 +530,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: GetWarehouseResponseWarehouseType - Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and also set the field `enable_serverless_compute` to `true`. - .. py:attribute:: CLASSIC :value: "CLASSIC" @@ -553,7 +545,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: GetWorkspaceWarehouseConfigResponseSecurityPolicy - Security policy for warehouses + Security policy to be used for warehouses .. py:attribute:: DATA_ACCESS_CONTROL :value: "DATA_ACCESS_CONTROL" @@ -979,7 +971,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SetWorkspaceWarehouseConfigRequestSecurityPolicy - Security policy for warehouses + Security policy to be used for warehouses .. py:attribute:: DATA_ACCESS_CONTROL :value: "DATA_ACCESS_CONTROL" @@ -996,7 +988,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SpotInstancePolicy - Configurations whether the warehouse should use spot instances. + EndpointSpotInstancePolicy configures whether the endpoint should use spot instances. + The breakdown of how the EndpointSpotInstancePolicy converts to per cloud configurations is: + +-------+--------------------------------------+--------------------------------+ | Cloud | COST_OPTIMIZED | RELIABILITY_OPTIMIZED | +-------+--------------------------------------+--------------------------------+ | AWS | On Demand Driver with Spot Executors | On Demand Driver and Executors | | AZURE | On Demand Driver and Executors | On Demand Driver and Executors | +-------+--------------------------------------+--------------------------------+ + While including "spot" in the enum name may limit the the future extensibility of this field because it limits this enum to denoting "spot or not", this is the field that PM recommends after discussion with customers per SC-48783. .. py:attribute:: COST_OPTIMIZED :value: "COST_OPTIMIZED" @@ -1013,7 +1008,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: State - State of the warehouse + * State of a warehouse. .. py:attribute:: DELETED :value: "DELETED" @@ -1043,8 +1038,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: StatementState - Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution failed; reason for failure described in accomanying error message - `CANCELED`: user canceled; can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful, and statement closed; result no longer available for fetch - .. py:attribute:: CANCELED :value: "CANCELED" @@ -1069,8 +1062,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: Status - Health status of the warehouse. - .. py:attribute:: DEGRADED :value: "DEGRADED" @@ -1080,9 +1071,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: HEALTHY :value: "HEALTHY" - .. py:attribute:: STATUS_UNSPECIFIED - :value: "STATUS_UNSPECIFIED" - .. autoclass:: StopWarehouseResponse :members: :undoc-members: @@ -1110,29 +1098,71 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: TerminationReasonCode - status code indicating why the cluster was terminated + The status code indicating why the cluster was terminated .. py:attribute:: ABUSE_DETECTED :value: "ABUSE_DETECTED" + .. py:attribute:: ACCESS_TOKEN_FAILURE + :value: "ACCESS_TOKEN_FAILURE" + + .. py:attribute:: ALLOCATION_TIMEOUT + :value: "ALLOCATION_TIMEOUT" + + .. py:attribute:: ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY + :value: "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY" + + .. py:attribute:: ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS + :value: "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS" + + .. py:attribute:: ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS + :value: "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS" + + .. py:attribute:: ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS + :value: "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS" + + .. py:attribute:: ALLOCATION_TIMEOUT_NO_READY_CLUSTERS + :value: "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS" + + .. py:attribute:: ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS + :value: "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS" + + .. py:attribute:: ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS + :value: "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS" + .. py:attribute:: ATTACH_PROJECT_FAILURE :value: "ATTACH_PROJECT_FAILURE" .. py:attribute:: AWS_AUTHORIZATION_FAILURE :value: "AWS_AUTHORIZATION_FAILURE" + .. py:attribute:: AWS_INACCESSIBLE_KMS_KEY_FAILURE + :value: "AWS_INACCESSIBLE_KMS_KEY_FAILURE" + + .. py:attribute:: AWS_INSTANCE_PROFILE_UPDATE_FAILURE + :value: "AWS_INSTANCE_PROFILE_UPDATE_FAILURE" + .. py:attribute:: AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE :value: "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE" .. py:attribute:: AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE :value: "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE" + .. py:attribute:: AWS_INVALID_KEY_PAIR + :value: "AWS_INVALID_KEY_PAIR" + + .. py:attribute:: AWS_INVALID_KMS_KEY_STATE + :value: "AWS_INVALID_KMS_KEY_STATE" + .. py:attribute:: AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE :value: "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE" .. py:attribute:: AWS_REQUEST_LIMIT_EXCEEDED :value: "AWS_REQUEST_LIMIT_EXCEEDED" + .. py:attribute:: AWS_RESOURCE_QUOTA_EXCEEDED + :value: "AWS_RESOURCE_QUOTA_EXCEEDED" + .. py:attribute:: AWS_UNSUPPORTED_FAILURE :value: "AWS_UNSUPPORTED_FAILURE" @@ -1148,6 +1178,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: AZURE_OPERATION_NOT_ALLOWED_EXCEPTION :value: "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION" + .. py:attribute:: AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE + :value: "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE" + .. py:attribute:: AZURE_QUOTA_EXCEEDED_EXCEPTION :value: "AZURE_QUOTA_EXCEEDED_EXCEPTION" @@ -1172,18 +1205,51 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION :value: "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION" + .. py:attribute:: BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG + :value: "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG" + + .. py:attribute:: BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED + :value: "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED" + + .. py:attribute:: BUDGET_POLICY_RESOLUTION_FAILURE + :value: "BUDGET_POLICY_RESOLUTION_FAILURE" + + .. py:attribute:: CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED + :value: "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED" + + .. py:attribute:: CLOUD_ACCOUNT_SETUP_FAILURE + :value: "CLOUD_ACCOUNT_SETUP_FAILURE" + + .. py:attribute:: CLOUD_OPERATION_CANCELLED + :value: "CLOUD_OPERATION_CANCELLED" + .. py:attribute:: CLOUD_PROVIDER_DISK_SETUP_FAILURE :value: "CLOUD_PROVIDER_DISK_SETUP_FAILURE" + .. py:attribute:: CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED + :value: "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED" + .. py:attribute:: CLOUD_PROVIDER_LAUNCH_FAILURE :value: "CLOUD_PROVIDER_LAUNCH_FAILURE" + .. py:attribute:: CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG + :value: "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: CLOUD_PROVIDER_RESOURCE_STOCKOUT :value: "CLOUD_PROVIDER_RESOURCE_STOCKOUT" + .. py:attribute:: CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG + :value: "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG" + .. py:attribute:: CLOUD_PROVIDER_SHUTDOWN :value: "CLOUD_PROVIDER_SHUTDOWN" + .. py:attribute:: CLUSTER_OPERATION_THROTTLED + :value: "CLUSTER_OPERATION_THROTTLED" + + .. py:attribute:: CLUSTER_OPERATION_TIMEOUT + :value: "CLUSTER_OPERATION_TIMEOUT" + .. py:attribute:: COMMUNICATION_LOST :value: "COMMUNICATION_LOST" @@ -1193,30 +1259,132 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CONTROL_PLANE_REQUEST_FAILURE :value: "CONTROL_PLANE_REQUEST_FAILURE" + .. py:attribute:: CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG + :value: "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: DATABASE_CONNECTION_FAILURE :value: "DATABASE_CONNECTION_FAILURE" + .. py:attribute:: DATA_ACCESS_CONFIG_CHANGED + :value: "DATA_ACCESS_CONFIG_CHANGED" + .. py:attribute:: DBFS_COMPONENT_UNHEALTHY :value: "DBFS_COMPONENT_UNHEALTHY" + .. py:attribute:: DISASTER_RECOVERY_REPLICATION + :value: "DISASTER_RECOVERY_REPLICATION" + + .. py:attribute:: DNS_RESOLUTION_ERROR + :value: "DNS_RESOLUTION_ERROR" + + .. py:attribute:: DOCKER_CONTAINER_CREATION_EXCEPTION + :value: "DOCKER_CONTAINER_CREATION_EXCEPTION" + .. py:attribute:: DOCKER_IMAGE_PULL_FAILURE :value: "DOCKER_IMAGE_PULL_FAILURE" + .. py:attribute:: DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION + :value: "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" + + .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION + :value: "DOCKER_INVALID_OS_EXCEPTION" + + .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE + :value: "DRIVER_DNS_RESOLUTION_FAILURE" + + .. py:attribute:: DRIVER_EVICTION + :value: "DRIVER_EVICTION" + + .. py:attribute:: DRIVER_LAUNCH_TIMEOUT + :value: "DRIVER_LAUNCH_TIMEOUT" + + .. py:attribute:: DRIVER_NODE_UNREACHABLE + :value: "DRIVER_NODE_UNREACHABLE" + + .. py:attribute:: DRIVER_OUT_OF_DISK + :value: "DRIVER_OUT_OF_DISK" + + .. py:attribute:: DRIVER_OUT_OF_MEMORY + :value: "DRIVER_OUT_OF_MEMORY" + + .. py:attribute:: DRIVER_POD_CREATION_FAILURE + :value: "DRIVER_POD_CREATION_FAILURE" + + .. py:attribute:: DRIVER_UNEXPECTED_FAILURE + :value: "DRIVER_UNEXPECTED_FAILURE" + + .. py:attribute:: DRIVER_UNHEALTHY + :value: "DRIVER_UNHEALTHY" + .. py:attribute:: DRIVER_UNREACHABLE :value: "DRIVER_UNREACHABLE" .. py:attribute:: DRIVER_UNRESPONSIVE :value: "DRIVER_UNRESPONSIVE" + .. py:attribute:: DYNAMIC_SPARK_CONF_SIZE_EXCEEDED + :value: "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED" + + .. py:attribute:: EOS_SPARK_IMAGE + :value: "EOS_SPARK_IMAGE" + .. py:attribute:: EXECUTION_COMPONENT_UNHEALTHY :value: "EXECUTION_COMPONENT_UNHEALTHY" + .. py:attribute:: EXECUTOR_POD_UNSCHEDULED + :value: "EXECUTOR_POD_UNSCHEDULED" + + .. py:attribute:: GCP_API_RATE_QUOTA_EXCEEDED + :value: "GCP_API_RATE_QUOTA_EXCEEDED" + + .. py:attribute:: GCP_DENIED_BY_ORG_POLICY + :value: "GCP_DENIED_BY_ORG_POLICY" + + .. py:attribute:: GCP_FORBIDDEN + :value: "GCP_FORBIDDEN" + + .. py:attribute:: GCP_IAM_TIMEOUT + :value: "GCP_IAM_TIMEOUT" + + .. py:attribute:: GCP_INACCESSIBLE_KMS_KEY_FAILURE + :value: "GCP_INACCESSIBLE_KMS_KEY_FAILURE" + + .. py:attribute:: GCP_INSUFFICIENT_CAPACITY + :value: "GCP_INSUFFICIENT_CAPACITY" + + .. py:attribute:: GCP_IP_SPACE_EXHAUSTED + :value: "GCP_IP_SPACE_EXHAUSTED" + + .. py:attribute:: GCP_KMS_KEY_PERMISSION_DENIED + :value: "GCP_KMS_KEY_PERMISSION_DENIED" + + .. py:attribute:: GCP_NOT_FOUND + :value: "GCP_NOT_FOUND" + .. py:attribute:: GCP_QUOTA_EXCEEDED :value: "GCP_QUOTA_EXCEEDED" + .. py:attribute:: GCP_RESOURCE_QUOTA_EXCEEDED + :value: "GCP_RESOURCE_QUOTA_EXCEEDED" + + .. py:attribute:: GCP_SERVICE_ACCOUNT_ACCESS_DENIED + :value: "GCP_SERVICE_ACCOUNT_ACCESS_DENIED" + .. py:attribute:: GCP_SERVICE_ACCOUNT_DELETED :value: "GCP_SERVICE_ACCOUNT_DELETED" + .. py:attribute:: GCP_SERVICE_ACCOUNT_NOT_FOUND + :value: "GCP_SERVICE_ACCOUNT_NOT_FOUND" + + .. py:attribute:: GCP_SUBNET_NOT_READY + :value: "GCP_SUBNET_NOT_READY" + + .. py:attribute:: GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED + :value: "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED" + + .. py:attribute:: GKE_BASED_CLUSTER_TERMINATION + :value: "GKE_BASED_CLUSTER_TERMINATION" + .. py:attribute:: GLOBAL_INIT_SCRIPT_FAILURE :value: "GLOBAL_INIT_SCRIPT_FAILURE" @@ -1229,69 +1397,159 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INACTIVITY :value: "INACTIVITY" + .. py:attribute:: INIT_CONTAINER_NOT_FINISHED + :value: "INIT_CONTAINER_NOT_FINISHED" + .. py:attribute:: INIT_SCRIPT_FAILURE :value: "INIT_SCRIPT_FAILURE" .. py:attribute:: INSTANCE_POOL_CLUSTER_FAILURE :value: "INSTANCE_POOL_CLUSTER_FAILURE" + .. py:attribute:: INSTANCE_POOL_MAX_CAPACITY_REACHED + :value: "INSTANCE_POOL_MAX_CAPACITY_REACHED" + + .. py:attribute:: INSTANCE_POOL_NOT_FOUND + :value: "INSTANCE_POOL_NOT_FOUND" + .. py:attribute:: INSTANCE_UNREACHABLE :value: "INSTANCE_UNREACHABLE" + .. py:attribute:: INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG + :value: "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG" + + .. py:attribute:: INTERNAL_CAPACITY_FAILURE + :value: "INTERNAL_CAPACITY_FAILURE" + .. py:attribute:: INTERNAL_ERROR :value: "INTERNAL_ERROR" .. py:attribute:: INVALID_ARGUMENT :value: "INVALID_ARGUMENT" + .. py:attribute:: INVALID_AWS_PARAMETER + :value: "INVALID_AWS_PARAMETER" + + .. py:attribute:: INVALID_INSTANCE_PLACEMENT_PROTOCOL + :value: "INVALID_INSTANCE_PLACEMENT_PROTOCOL" + .. py:attribute:: INVALID_SPARK_IMAGE :value: "INVALID_SPARK_IMAGE" + .. py:attribute:: INVALID_WORKER_IMAGE_FAILURE + :value: "INVALID_WORKER_IMAGE_FAILURE" + + .. py:attribute:: IN_PENALTY_BOX + :value: "IN_PENALTY_BOX" + .. py:attribute:: IP_EXHAUSTION_FAILURE :value: "IP_EXHAUSTION_FAILURE" .. py:attribute:: JOB_FINISHED :value: "JOB_FINISHED" + .. py:attribute:: K8S_ACTIVE_POD_QUOTA_EXCEEDED + :value: "K8S_ACTIVE_POD_QUOTA_EXCEEDED" + .. py:attribute:: K8S_AUTOSCALING_FAILURE :value: "K8S_AUTOSCALING_FAILURE" .. py:attribute:: K8S_DBR_CLUSTER_LAUNCH_TIMEOUT :value: "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT" + .. py:attribute:: LAZY_ALLOCATION_TIMEOUT + :value: "LAZY_ALLOCATION_TIMEOUT" + + .. py:attribute:: MAINTENANCE_MODE + :value: "MAINTENANCE_MODE" + .. py:attribute:: METASTORE_COMPONENT_UNHEALTHY :value: "METASTORE_COMPONENT_UNHEALTHY" .. py:attribute:: NEPHOS_RESOURCE_MANAGEMENT :value: "NEPHOS_RESOURCE_MANAGEMENT" + .. py:attribute:: NETVISOR_SETUP_TIMEOUT + :value: "NETVISOR_SETUP_TIMEOUT" + + .. py:attribute:: NETWORK_CHECK_CONTROL_PLANE_FAILURE + :value: "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + + .. py:attribute:: NETWORK_CHECK_DNS_SERVER_FAILURE + :value: "NETWORK_CHECK_DNS_SERVER_FAILURE" + + .. py:attribute:: NETWORK_CHECK_METADATA_ENDPOINT_FAILURE + :value: "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + + .. py:attribute:: NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE + :value: "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + + .. py:attribute:: NETWORK_CHECK_NIC_FAILURE + :value: "NETWORK_CHECK_NIC_FAILURE" + + .. py:attribute:: NETWORK_CHECK_STORAGE_FAILURE + :value: "NETWORK_CHECK_STORAGE_FAILURE" + .. py:attribute:: NETWORK_CONFIGURATION_FAILURE :value: "NETWORK_CONFIGURATION_FAILURE" .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" + .. py:attribute:: NO_ACTIVATED_K8S + :value: "NO_ACTIVATED_K8S" + + .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG + :value: "NO_ACTIVATED_K8S_TESTING_TAG" + + .. py:attribute:: NO_MATCHED_K8S + :value: "NO_MATCHED_K8S" + + .. py:attribute:: NO_MATCHED_K8S_TESTING_TAG + :value: "NO_MATCHED_K8S_TESTING_TAG" + .. py:attribute:: NPIP_TUNNEL_SETUP_FAILURE :value: "NPIP_TUNNEL_SETUP_FAILURE" .. py:attribute:: NPIP_TUNNEL_TOKEN_FAILURE :value: "NPIP_TUNNEL_TOKEN_FAILURE" + .. py:attribute:: POD_ASSIGNMENT_FAILURE + :value: "POD_ASSIGNMENT_FAILURE" + + .. py:attribute:: POD_SCHEDULING_FAILURE + :value: "POD_SCHEDULING_FAILURE" + .. py:attribute:: REQUEST_REJECTED :value: "REQUEST_REJECTED" .. py:attribute:: REQUEST_THROTTLED :value: "REQUEST_THROTTLED" + .. py:attribute:: RESOURCE_USAGE_BLOCKED + :value: "RESOURCE_USAGE_BLOCKED" + + .. py:attribute:: SECRET_CREATION_FAILURE + :value: "SECRET_CREATION_FAILURE" + + .. py:attribute:: SECRET_PERMISSION_DENIED + :value: "SECRET_PERMISSION_DENIED" + .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" + .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION + :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" + .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" .. py:attribute:: SELF_BOOTSTRAP_FAILURE :value: "SELF_BOOTSTRAP_FAILURE" + .. py:attribute:: SERVERLESS_LONG_RUNNING_TERMINATED + :value: "SERVERLESS_LONG_RUNNING_TERMINATED" + .. py:attribute:: SKIPPED_SLOW_NODES :value: "SKIPPED_SLOW_NODES" @@ -1304,15 +1562,33 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SPARK_IMAGE_DOWNLOAD_FAILURE :value: "SPARK_IMAGE_DOWNLOAD_FAILURE" + .. py:attribute:: SPARK_IMAGE_DOWNLOAD_THROTTLED + :value: "SPARK_IMAGE_DOWNLOAD_THROTTLED" + + .. py:attribute:: SPARK_IMAGE_NOT_FOUND + :value: "SPARK_IMAGE_NOT_FOUND" + .. py:attribute:: SPARK_STARTUP_FAILURE :value: "SPARK_STARTUP_FAILURE" .. py:attribute:: SPOT_INSTANCE_TERMINATION :value: "SPOT_INSTANCE_TERMINATION" + .. py:attribute:: SSH_BOOTSTRAP_FAILURE + :value: "SSH_BOOTSTRAP_FAILURE" + .. py:attribute:: STORAGE_DOWNLOAD_FAILURE :value: "STORAGE_DOWNLOAD_FAILURE" + .. py:attribute:: STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG + :value: "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG" + + .. py:attribute:: STORAGE_DOWNLOAD_FAILURE_SLOW + :value: "STORAGE_DOWNLOAD_FAILURE_SLOW" + + .. py:attribute:: STORAGE_DOWNLOAD_FAILURE_THROTTLED + :value: "STORAGE_DOWNLOAD_FAILURE_THROTTLED" + .. py:attribute:: STS_CLIENT_SETUP_FAILURE :value: "STS_CLIENT_SETUP_FAILURE" @@ -1328,6 +1604,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UNEXPECTED_LAUNCH_FAILURE :value: "UNEXPECTED_LAUNCH_FAILURE" + .. py:attribute:: UNEXPECTED_POD_RECREATION + :value: "UNEXPECTED_POD_RECREATION" + .. py:attribute:: UNKNOWN :value: "UNKNOWN" @@ -1337,6 +1616,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UPDATE_INSTANCE_PROFILE_FAILURE :value: "UPDATE_INSTANCE_PROFILE_FAILURE" + .. py:attribute:: USAGE_POLICY_ENTITLEMENT_DENIED + :value: "USAGE_POLICY_ENTITLEMENT_DENIED" + + .. py:attribute:: USER_INITIATED_VM_TERMINATION + :value: "USER_INITIATED_VM_TERMINATION" + .. py:attribute:: USER_REQUEST :value: "USER_REQUEST" @@ -1349,6 +1634,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WORKSPACE_CONFIGURATION_ERROR :value: "WORKSPACE_CONFIGURATION_ERROR" + .. py:attribute:: WORKSPACE_UPDATE + :value: "WORKSPACE_UPDATE" + .. py:class:: TerminationReasonType type of the termination @@ -1446,8 +1734,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: WarehouseTypePairWarehouseType - Warehouse type: `PRO` or `CLASSIC`. - .. py:attribute:: CLASSIC :value: "CLASSIC" diff --git a/docs/gen-client-docs.py b/docs/gen-client-docs.py index 7db1b3c25..7e907d735 100644 --- a/docs/gen-client-docs.py +++ b/docs/gen-client-docs.py @@ -270,7 +270,12 @@ class Generator: "iamv2", "Identity and Access Management", "Manage identities and workspace access." - ) + ), + Package( + "dataquality", + "Data Quality", + "Manage data quality monitoring on Unity Catalog objects." + ), ] def __init__(self): @@ -298,7 +303,7 @@ def _load_mapping(self) -> dict[str, Tag]: clean_parent_service = parent_service.lower().removeprefix("account") key = f"{key}.{clean_parent_service}" - key = f"{key}.{tag['x-databricks-service']}".lower() + key = f"{key}.{tag['x-databricks-service'].replace("_", "")}".lower() package = tag['x-databricks-package'] t = Tag(name=tag['name'], service=tag['x-databricks-service'], diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index d3761b9db..62cc0c22d 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -23,6 +23,32 @@ .. py:method:: create_and_wait(app: App [, no_compute: Optional[bool], timeout: datetime.timedelta = 0:20:00]) -> App + .. py:method:: create_update(app_name: str, update_mask: str [, app: Optional[App]]) -> Wait[AppUpdate] + + Creates an app update and starts the update process. The update process is asynchronous and the status + of the update can be checked with the GetAppUpdate method. + + :param app_name: str + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + :param app: :class:`App` (optional) + + :returns: + Long-running operation waiter for :class:`AppUpdate`. + See :method:wait_get_update_app_succeeded for more details. + + + .. py:method:: create_update_and_wait(app_name: str, update_mask: str [, app: Optional[App], timeout: datetime.timedelta = 0:20:00]) -> AppUpdate + + .. py:method:: delete(name: str) -> App Deletes an app. @@ -92,6 +118,16 @@ :returns: :class:`AppPermissions` + .. py:method:: get_update(app_name: str) -> AppUpdate + + Gets the status of an app update. + + :param app_name: str + The name of the app. + + :returns: :class:`AppUpdate` + + .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[App] Lists all apps in the workspace. @@ -190,3 +226,6 @@ .. py:method:: wait_get_deployment_app_succeeded(app_name: str, deployment_id: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[AppDeployment], None]]) -> AppDeployment + + + .. py:method:: wait_get_update_app_succeeded(app_name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[AppUpdate], None]]) -> AppUpdate diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 9a18ede8a..47cff9e95 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -99,7 +99,7 @@ :returns: :class:`CatalogInfo` - .. py:method:: list( [, include_browse: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[CatalogInfo] + .. py:method:: list( [, include_browse: Optional[bool], include_unbound: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[CatalogInfo] Usage: @@ -118,9 +118,20 @@ **USE_CATALOG** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param include_browse: bool (optional) Whether to include catalogs in the response for which the principal can only access selective metadata for + :param include_unbound: bool (optional) + Whether to include catalogs not bound to the workspace. Effective only if the user has permission to + update the catalog–workspace binding. :param max_results: int (optional) Maximum number of catalogs to return. - when set to 0, the page length is set to a server configured value (recommended); - when set to a value greater than 0, the page length is the minimum of this diff --git a/docs/workspace/catalog/connections.rst b/docs/workspace/catalog/connections.rst index ec7f39be2..8051478d3 100644 --- a/docs/workspace/catalog/connections.rst +++ b/docs/workspace/catalog/connections.rst @@ -135,6 +135,14 @@ List all connections. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param max_results: int (optional) Maximum number of connections to return. - If not set, all connections are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this value and diff --git a/docs/workspace/catalog/credentials.rst b/docs/workspace/catalog/credentials.rst index 93b49d3f4..ed67f89ac 100644 --- a/docs/workspace/catalog/credentials.rst +++ b/docs/workspace/catalog/credentials.rst @@ -82,7 +82,7 @@ :returns: :class:`CredentialInfo` - .. py:method:: list_credentials( [, max_results: Optional[int], page_token: Optional[str], purpose: Optional[CredentialPurpose]]) -> Iterator[CredentialInfo] + .. py:method:: list_credentials( [, include_unbound: Optional[bool], max_results: Optional[int], page_token: Optional[str], purpose: Optional[CredentialPurpose]]) -> Iterator[CredentialInfo] Gets an array of credentials (as __CredentialInfo__ objects). @@ -90,6 +90,9 @@ is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of the elements in the array. + :param include_unbound: bool (optional) + Whether to include credentials not bound to the workspace. Effective only if the user has permission + to update the credential–workspace binding. :param max_results: int (optional) Maximum number of credentials to return. - If not set, the default max page size is used. - When set to a value greater than 0, the page length is the minimum of this value and a server-configured diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 624fe1958..b8b70227f 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -67,7 +67,8 @@ enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. :param file_event_queue: :class:`FileEventQueue` (optional) - File event queue settings. + File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly one + of the documented properties. :param read_only: bool (optional) Indicates whether the external location is read-only. :param skip_validation: bool (optional) @@ -133,7 +134,7 @@ :returns: :class:`ExternalLocationInfo` - .. py:method:: list( [, include_browse: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ExternalLocationInfo] + .. py:method:: list( [, include_browse: Optional[bool], include_unbound: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ExternalLocationInfo] Usage: @@ -151,9 +152,20 @@ must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param include_browse: bool (optional) Whether to include external locations in the response for which the principal can only access selective metadata for + :param include_unbound: bool (optional) + Whether to include external locations not bound to the workspace. Effective only if the user has + permission to update the location–workspace binding. :param max_results: int (optional) Maximum number of external locations to return. If not set, all the external locations are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this @@ -219,7 +231,8 @@ enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. :param file_event_queue: :class:`FileEventQueue` (optional) - File event queue settings. + File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly one + of the documented properties. :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. :param isolation_mode: :class:`IsolationMode` (optional) diff --git a/docs/workspace/catalog/functions.rst b/docs/workspace/catalog/functions.rst index 0297c0e22..ff2d0ad14 100644 --- a/docs/workspace/catalog/functions.rst +++ b/docs/workspace/catalog/functions.rst @@ -36,7 +36,7 @@ :param name: str The fully-qualified name of the function (of the form - __catalog_name__.__schema_name__.__function__name__). + __catalog_name__.__schema_name__.__function__name__) . :param force: bool (optional) Force deletion even if the function is notempty. @@ -70,6 +70,14 @@ functions for which either the user has the **EXECUTE** privilege or the user is the owner. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param catalog_name: str Name of parent catalog for functions of interest. :param schema_name: str @@ -101,7 +109,7 @@ The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). :param owner: str (optional) - Username of current owner of function. + Username of current owner of the function. :returns: :class:`FunctionInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/metastores.rst b/docs/workspace/catalog/metastores.rst index 28636acf9..a6db85f60 100644 --- a/docs/workspace/catalog/metastores.rst +++ b/docs/workspace/catalog/metastores.rst @@ -174,6 +174,14 @@ Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param max_results: int (optional) Maximum number of metastores to return. - when set to a value greater than 0, the page length is the minimum of this value and a server configured value; - when set to 0, the page length is set to a diff --git a/docs/workspace/catalog/model_versions.rst b/docs/workspace/catalog/model_versions.rst index ec3ed4985..18aed8bb4 100644 --- a/docs/workspace/catalog/model_versions.rst +++ b/docs/workspace/catalog/model_versions.rst @@ -98,7 +98,7 @@ :returns: Iterator over :class:`ModelVersionInfo` - .. py:method:: update(full_name: str, version: int [, comment: Optional[str]]) -> ModelVersionInfo + .. py:method:: update(full_name: str, version: int [, aliases: Optional[List[RegisteredModelAlias]], catalog_name: Optional[str], comment: Optional[str], created_at: Optional[int], created_by: Optional[str], id: Optional[str], metastore_id: Optional[str], model_name: Optional[str], model_version_dependencies: Optional[DependencyList], run_id: Optional[str], run_workspace_id: Optional[int], schema_name: Optional[str], source: Optional[str], status: Optional[ModelVersionInfoStatus], storage_location: Optional[str], updated_at: Optional[int], updated_by: Optional[str]]) -> ModelVersionInfo Updates the specified model version. @@ -112,8 +112,42 @@ The three-level (fully qualified) name of the model version :param version: int The integer version number of the model version + :param aliases: List[:class:`RegisteredModelAlias`] (optional) + List of aliases associated with the model version + :param catalog_name: str (optional) + The name of the catalog containing the model version :param comment: str (optional) The comment attached to the model version + :param created_at: int (optional) + :param created_by: str (optional) + The identifier of the user who created the model version + :param id: str (optional) + The unique identifier of the model version + :param metastore_id: str (optional) + The unique identifier of the metastore containing the model version + :param model_name: str (optional) + The name of the parent registered model of the model version, relative to parent schema + :param model_version_dependencies: :class:`DependencyList` (optional) + Model version dependencies, for feature-store packaged models + :param run_id: str (optional) + MLflow run ID used when creating the model version, if ``source`` was generated by an experiment run + stored in an MLflow tracking server + :param run_workspace_id: int (optional) + ID of the Databricks workspace containing the MLflow run that generated this model version, if + applicable + :param schema_name: str (optional) + The name of the schema containing the model version, relative to parent catalog + :param source: str (optional) + URI indicating the location of the source artifacts (files) for the model version + :param status: :class:`ModelVersionInfoStatus` (optional) + Current status of the model version. Newly created model versions start in PENDING_REGISTRATION + status, then move to READY status once the model version files are uploaded and the model version is + finalized. Only model versions in READY status can be loaded for inference or served. + :param storage_location: str (optional) + The storage location on the cloud under which model version data files are stored + :param updated_at: int (optional) + :param updated_by: str (optional) + The identifier of the user who updated the model version last time :returns: :class:`ModelVersionInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/registered_models.rst b/docs/workspace/catalog/registered_models.rst index 4e6b0aa6b..947502240 100644 --- a/docs/workspace/catalog/registered_models.rst +++ b/docs/workspace/catalog/registered_models.rst @@ -26,10 +26,10 @@ new model version, or update permissions on the registered model, users must be owners of the registered model. - Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants) that - specify a securable type, use "FUNCTION" as the securable type. + Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants) that specify + a securable type, use FUNCTION as the securable type. - .. py:method:: create(catalog_name: str, schema_name: str, name: str [, comment: Optional[str], storage_location: Optional[str]]) -> RegisteredModelInfo + .. py:method:: create( [, aliases: Optional[List[RegisteredModelAlias]], browse_only: Optional[bool], catalog_name: Optional[str], comment: Optional[str], created_at: Optional[int], created_by: Optional[str], full_name: Optional[str], metastore_id: Optional[str], name: Optional[str], owner: Optional[str], schema_name: Optional[str], storage_location: Optional[str], updated_at: Optional[int], updated_by: Optional[str]]) -> RegisteredModelInfo Creates a new registered model in Unity Catalog. @@ -41,16 +41,35 @@ **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent schema. - :param catalog_name: str + :param aliases: List[:class:`RegisteredModelAlias`] (optional) + List of aliases associated with the registered model + :param browse_only: bool (optional) + Indicates whether the principal is limited to retrieving metadata for the associated object through + the BROWSE privilege when include_browse is enabled in the request. + :param catalog_name: str (optional) The name of the catalog where the schema and the registered model reside - :param schema_name: str - The name of the schema where the registered model resides - :param name: str - The name of the registered model :param comment: str (optional) The comment attached to the registered model + :param created_at: int (optional) + Creation timestamp of the registered model in milliseconds since the Unix epoch + :param created_by: str (optional) + The identifier of the user who created the registered model + :param full_name: str (optional) + The three-level (fully qualified) name of the registered model + :param metastore_id: str (optional) + The unique identifier of the metastore + :param name: str (optional) + The name of the registered model + :param owner: str (optional) + The identifier of the user who owns the registered model + :param schema_name: str (optional) + The name of the schema where the registered model resides :param storage_location: str (optional) The storage location on the cloud under which model version data files are stored + :param updated_at: int (optional) + Last-update timestamp of the registered model in milliseconds since the Unix epoch + :param updated_by: str (optional) + The identifier of the user who updated the registered model last time :returns: :class:`RegisteredModelInfo` @@ -155,7 +174,7 @@ **USE_SCHEMA** privilege on the parent schema. :param full_name: str - Full name of the registered model + The three-level (fully qualified) name of the registered model :param alias: str The name of the alias :param version_num: int @@ -164,7 +183,7 @@ :returns: :class:`RegisteredModelAlias` - .. py:method:: update(full_name: str [, comment: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> RegisteredModelInfo + .. py:method:: update(full_name: str [, aliases: Optional[List[RegisteredModelAlias]], browse_only: Optional[bool], catalog_name: Optional[str], comment: Optional[str], created_at: Optional[int], created_by: Optional[str], metastore_id: Optional[str], name: Optional[str], new_name: Optional[str], owner: Optional[str], schema_name: Optional[str], storage_location: Optional[str], updated_at: Optional[int], updated_by: Optional[str]]) -> RegisteredModelInfo Updates the specified registered model. @@ -176,12 +195,35 @@ :param full_name: str The three-level (fully qualified) name of the registered model + :param aliases: List[:class:`RegisteredModelAlias`] (optional) + List of aliases associated with the registered model + :param browse_only: bool (optional) + Indicates whether the principal is limited to retrieving metadata for the associated object through + the BROWSE privilege when include_browse is enabled in the request. + :param catalog_name: str (optional) + The name of the catalog where the schema and the registered model reside :param comment: str (optional) The comment attached to the registered model + :param created_at: int (optional) + Creation timestamp of the registered model in milliseconds since the Unix epoch + :param created_by: str (optional) + The identifier of the user who created the registered model + :param metastore_id: str (optional) + The unique identifier of the metastore + :param name: str (optional) + The name of the registered model :param new_name: str (optional) New name for the registered model. :param owner: str (optional) The identifier of the user who owns the registered model + :param schema_name: str (optional) + The name of the schema where the registered model resides + :param storage_location: str (optional) + The storage location on the cloud under which model version data files are stored + :param updated_at: int (optional) + Last-update timestamp of the registered model in milliseconds since the Unix epoch + :param updated_by: str (optional) + The identifier of the user who updated the registered model last time :returns: :class:`RegisteredModelInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index fd1d323a1..f8d676c84 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -120,6 +120,14 @@ owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param catalog_name: str Parent catalog for schemas of interest. :param include_browse: bool (optional) diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 5fe0bb70f..48666f7ab 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -32,11 +32,11 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Creates a new storage credential. @@ -115,7 +115,7 @@ :returns: :class:`StorageCredentialInfo` - .. py:method:: list( [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[StorageCredentialInfo] + .. py:method:: list( [, include_unbound: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[StorageCredentialInfo] Usage: @@ -123,16 +123,28 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list() + all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + + :param include_unbound: bool (optional) + Whether to include credentials not bound to the workspace. Effective only if the user has permission + to update the credential–workspace binding. :param max_results: int (optional) Maximum number of storage credentials to return. If not set, all the storage credentials are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of diff --git a/docs/workspace/catalog/system_schemas.rst b/docs/workspace/catalog/system_schemas.rst index 6c94e5349..b9a1c5778 100644 --- a/docs/workspace/catalog/system_schemas.rst +++ b/docs/workspace/catalog/system_schemas.rst @@ -40,6 +40,14 @@ Gets an array of system schemas for a metastore. The caller must be an account admin or a metastore admin. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param metastore_id: str The ID for the metastore in which the system schema resides. :param max_results: int (optional) diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index c4b6dad3d..75d4138fd 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -156,7 +156,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) + summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) @@ -168,6 +168,14 @@ catalog and the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a specific ordering of the elements in the array. + NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated calls + will be deprecated soon. + + PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero results while + still providing a next_page_token. Clients must continue reading pages until next_page_token is + absent, which is the only indication that the end of results has been reached. This behavior follows + Google AIP-158 guidelines. + :param catalog_name: str Name of parent catalog for tables of interest. :param schema_name: str diff --git a/docs/workspace/catalog/volumes.rst b/docs/workspace/catalog/volumes.rst index 78d84fd57..bb2f890c2 100644 --- a/docs/workspace/catalog/volumes.rst +++ b/docs/workspace/catalog/volumes.rst @@ -81,6 +81,11 @@ :param name: str The name of the volume :param volume_type: :class:`VolumeType` + The type of the volume. An external volume is located in the specified external location. A managed + volume is located in the default location which is specified by the parent schema, or the parent + catalog, or the Metastore. [Learn more] + + [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external :param comment: str (optional) The comment attached to the volume :param storage_location: str (optional) @@ -130,7 +135,7 @@ The returned volumes are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the volumes. A regular user needs to be the owner or have the - **READ VOLUME** privilege on the volume to recieve the volumes in the response. For the latter case, + **READ VOLUME** privilege on the volume to receive the volumes in the response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. diff --git a/docs/workspace/catalog/workspace_bindings.rst b/docs/workspace/catalog/workspace_bindings.rst index 6287456d9..bda071eb8 100644 --- a/docs/workspace/catalog/workspace_bindings.rst +++ b/docs/workspace/catalog/workspace_bindings.rst @@ -116,9 +116,11 @@ :param securable_name: str The name of the securable. :param add: List[:class:`WorkspaceBinding`] (optional) - List of workspace bindings. + List of workspace bindings to add. If a binding for the workspace already exists with a different + binding_type, adding it again with a new binding_type will update the existing binding (e.g., from + READ_WRITE to READ_ONLY). :param remove: List[:class:`WorkspaceBinding`] (optional) - List of workspace bindings. + List of workspace bindings to remove. :returns: :class:`UpdateWorkspaceBindingsResponse` \ No newline at end of file diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index c0d16b9f8..3ceb286ef 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -183,8 +183,8 @@ :param space_id: str The ID of the Genie space to retrieve conversations from. :param include_all: bool (optional) - Include all conversations in the space across all users. Requires "Can Manage" permission on the - space. + Include all conversations in the space across all users. Requires at least CAN MANAGE permission on + the space. :param page_size: int (optional) Maximum number of conversations to return per page :param page_token: str (optional) @@ -205,7 +205,7 @@ :returns: :class:`GenieListSpacesResponse` - .. py:method:: send_message_feedback(space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating [, comment: Optional[str]]) + .. py:method:: send_message_feedback(space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating) Send feedback for a message. @@ -217,8 +217,6 @@ The ID associated with the message to provide feedback for. :param rating: :class:`GenieFeedbackRating` The rating (POSITIVE, NEGATIVE, or NONE). - :param comment: str (optional) - Optional text feedback that will be stored as a comment. diff --git a/docs/workspace/database/database.rst b/docs/workspace/database/database.rst index 36e594ec7..42ba0bb4d 100644 --- a/docs/workspace/database/database.rst +++ b/docs/workspace/database/database.rst @@ -30,12 +30,13 @@ .. py:method:: create_database_instance_and_wait(database_instance: DatabaseInstance, timeout: datetime.timedelta = 0:20:00) -> DatabaseInstance - .. py:method:: create_database_instance_role(instance_name: str, database_instance_role: DatabaseInstanceRole) -> DatabaseInstanceRole + .. py:method:: create_database_instance_role(instance_name: str, database_instance_role: DatabaseInstanceRole [, database_instance_name: Optional[str]]) -> DatabaseInstanceRole Create a role for a Database Instance. :param instance_name: str :param database_instance_role: :class:`DatabaseInstanceRole` + :param database_instance_name: str (optional) :returns: :class:`DatabaseInstanceRole` @@ -78,12 +79,8 @@ By default, a instance cannot be deleted if it has descendant instances created via PITR. If this flag is specified as true, all descendent instances will be deleted as well. :param purge: bool (optional) - Note purge=false is in development. If false, the database instance is soft deleted (implementation - pending). Soft deleted instances behave as if they are deleted, and cannot be used for CRUD - operations nor connected to. However they can be undeleted by calling the undelete API for a limited - time (implementation pending). If true, the database instance is hard deleted and cannot be - undeleted. For the time being, setting this value to true is required to delete an instance (soft - delete is not yet supported). + Deprecated. Omitting the field or setting it to true will result in the field being hard deleted. + Setting a value of false will throw a bad request. diff --git a/docs/workspace/dataquality/data_quality.rst b/docs/workspace/dataquality/data_quality.rst new file mode 100644 index 000000000..5a64d4dcd --- /dev/null +++ b/docs/workspace/dataquality/data_quality.rst @@ -0,0 +1,202 @@ +``w.data_quality``: DataQuality.v1 +================================== +.. currentmodule:: databricks.sdk.service.dataquality + +.. py:class:: DataQualityAPI + + Manage the data quality of Unity Catalog objects (currently support `schema` and `table`) + + .. py:method:: cancel_refresh(object_type: str, object_id: str, refresh_id: int) -> CancelRefreshResponse + + Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + + :returns: :class:`CancelRefreshResponse` + + + .. py:method:: create_monitor(monitor: Monitor) -> Monitor + + Create a data quality monitor on a Unity Catalog object. The caller must provide either + `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog, + have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the table 2. have + **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent schema, and have + **SELECT** access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's + parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. + + Workspace assets, such as the dashboard, will be created in the workspace where this call was made. + + :param monitor: :class:`Monitor` + The monitor to create. + + :returns: :class:`Monitor` + + + .. py:method:: create_refresh(object_type: str, object_id: str, refresh: Refresh) -> Refresh + + Creates a refresh. Currently only supported for the `table` `object_type`. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema`or `table`. + :param object_id: str + The UUID of the request object. For example, table id. + :param refresh: :class:`Refresh` + The refresh to create + + :returns: :class:`Refresh` + + + .. py:method:: delete_monitor(object_type: str, object_id: str) + + Delete a data quality monitor on Unity Catalog object. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - be an owner of the table. + + Note that the metric tables and dashboard will not be deleted as part of this call; those assets must + be manually cleaned up (if desired). + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + + + + + .. py:method:: delete_refresh(object_type: str, object_id: str, refresh_id: int) + + (Unimplemented) Delete a refresh + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + + + + + .. py:method:: get_monitor(object_type: str, object_id: str) -> Monitor + + Read a data quality monitor on Unity Catalog object. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema. 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - **SELECT** privilege on the table. + + The returned information includes configuration values, as well as information on assets created by + the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different + workspace than where the monitor was created. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + + :returns: :class:`Monitor` + + + .. py:method:: get_refresh(object_type: str, object_id: str, refresh_id: int) -> Refresh + + Get data quality monitor refresh. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - **SELECT** privilege on the table. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + + :returns: :class:`Refresh` + + + .. py:method:: list_monitor( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Monitor] + + (Unimplemented) List data quality monitors. + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`Monitor` + + + .. py:method:: list_refresh(object_type: str, object_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Refresh] + + List data quality monitor refreshes. + + For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - **SELECT** privilege on the table. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`Refresh` + + + .. py:method:: update_monitor(object_type: str, object_id: str, monitor: Monitor, update_mask: str) -> Monitor + + Update a data quality monitor on Unity Catalog object. + + For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent catalog 2. + have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. + have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on + the table's parent schema - be an owner of the table. + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param monitor: :class:`Monitor` + The monitor to update. + :param update_mask: str + The field mask to specify which fields to update as a comma-separated list. Example value: + `data_profiling_config.custom_metrics,data_profiling_config.schedule.quartz_cron_expression` + + :returns: :class:`Monitor` + + + .. py:method:: update_refresh(object_type: str, object_id: str, refresh_id: int, refresh: Refresh, update_mask: str) -> Refresh + + (Unimplemented) Update a refresh + + :param object_type: str + The type of the monitored object. Can be one of the following: `schema` or `table`. + :param object_id: str + The UUID of the request object. For example, schema id. + :param refresh_id: int + Unique id of the refresh operation. + :param refresh: :class:`Refresh` + The refresh to update. + :param update_mask: str + The field mask to specify which fields to update. + + :returns: :class:`Refresh` + \ No newline at end of file diff --git a/docs/workspace/dataquality/index.rst b/docs/workspace/dataquality/index.rst new file mode 100644 index 000000000..f5167a209 --- /dev/null +++ b/docs/workspace/dataquality/index.rst @@ -0,0 +1,10 @@ + +Data Quality +============ + +Manage data quality monitoring on Unity Catalog objects. + +.. toctree:: + :maxdepth: 1 + + data_quality \ No newline at end of file diff --git a/docs/workspace/iam/account_access_control_proxy.rst b/docs/workspace/iam/account_access_control_proxy.rst index 39b5a83ca..384cfa3af 100644 --- a/docs/workspace/iam/account_access_control_proxy.rst +++ b/docs/workspace/iam/account_access_control_proxy.rst @@ -19,7 +19,8 @@ Examples | Summary :--- | :--- `resource=accounts/` | A resource name for the account. `resource=accounts//groups/` | A resource name for the group. `resource=accounts//servicePrincipals/` | A resource name for the service - principal. + principal. `resource=accounts//tagPolicies/` | A resource name for the + tag policy. :returns: :class:`GetAssignableRolesForResourceResponse` @@ -37,6 +38,8 @@ set on the group. `name=accounts//servicePrincipals//ruleSets/default` | A name for a rule set on the service principal. + `name=accounts//tagPolicies//ruleSets/default` | A name for a rule set on + the tag policy. :param etag: str Etag used for versioning. The response is at least as fresh as the eTag provided. Etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a rule set from diff --git a/docs/workspace/iam/groups_v2.rst b/docs/workspace/iam/groups_v2.rst new file mode 100644 index 000000000..b0bbf3dd1 --- /dev/null +++ b/docs/workspace/iam/groups_v2.rst @@ -0,0 +1,124 @@ +``w.groups_v2``: Groups +======================= +.. currentmodule:: databricks.sdk.service.iam + +.. py:class:: GroupsV2API + + Groups simplify identity management, making it easier to assign access to Databricks workspace, data, and + other securable objects. + + It is best practice to assign access to workspaces and access-control policies in Unity Catalog to groups, + instead of to users individually. All Databricks workspace identities can be assigned as members of + groups, and members inherit permissions that are assigned to their group. + + .. py:method:: create( [, display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], id: Optional[str], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]], schemas: Optional[List[GroupSchema]]]) -> Group + + Creates a group in the Databricks workspace with a unique name, using the supplied group details. + + :param display_name: str (optional) + String that represents a human-readable group name + :param entitlements: List[:class:`ComplexValue`] (optional) + Entitlements assigned to the group. See [assigning entitlements] for a full list of supported + values. + + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + :param external_id: str (optional) + :param groups: List[:class:`ComplexValue`] (optional) + :param id: str (optional) + Databricks group ID + :param members: List[:class:`ComplexValue`] (optional) + :param meta: :class:`ResourceMeta` (optional) + Container for the group identifier. Workspace local versus account. + :param roles: List[:class:`ComplexValue`] (optional) + Corresponds to AWS instance profile/arn role. + :param schemas: List[:class:`GroupSchema`] (optional) + The schema of the group. + + :returns: :class:`Group` + + + .. py:method:: delete(id: str) + + Deletes a group from the Databricks workspace. + + :param id: str + Unique ID for a group in the Databricks workspace. + + + + + .. py:method:: get(id: str) -> Group + + Gets the information for a specific group in the Databricks workspace. + + :param id: str + Unique ID for a group in the Databricks workspace. + + :returns: :class:`Group` + + + .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[Group] + + Gets all details of the groups associated with the Databricks workspace. + + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. + :param sort_order: :class:`ListSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: Iterator over :class:`Group` + + + .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) + + Partially updates the details of a group. + + :param id: str + Unique ID in the Databricks workspace. + :param operations: List[:class:`Patch`] (optional) + :param schemas: List[:class:`PatchSchema`] (optional) + The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + + + + + .. py:method:: update(id: str [, display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]], schemas: Optional[List[GroupSchema]]]) + + Updates the details of a group by replacing the entire group entity. + + :param id: str + Databricks group ID + :param display_name: str (optional) + String that represents a human-readable group name + :param entitlements: List[:class:`ComplexValue`] (optional) + Entitlements assigned to the group. See [assigning entitlements] for a full list of supported + values. + + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + :param external_id: str (optional) + :param groups: List[:class:`ComplexValue`] (optional) + :param members: List[:class:`ComplexValue`] (optional) + :param meta: :class:`ResourceMeta` (optional) + Container for the group identifier. Workspace local versus account. + :param roles: List[:class:`ComplexValue`] (optional) + Corresponds to AWS instance profile/arn role. + :param schemas: List[:class:`GroupSchema`] (optional) + The schema of the group. + + + \ No newline at end of file diff --git a/docs/workspace/iam/index.rst b/docs/workspace/iam/index.rst index 00a7f1fe7..c3b7cced1 100644 --- a/docs/workspace/iam/index.rst +++ b/docs/workspace/iam/index.rst @@ -10,8 +10,8 @@ Manage users, service principals, groups and their permissions in Accounts and W access_control account_access_control_proxy current_user - groups + groups_v2 permission_migration permissions - service_principals - users \ No newline at end of file + service_principals_v2 + users_v2 \ No newline at end of file diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index dae53fa2e..ea24afd1a 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -44,14 +44,14 @@ obj = w.workspace.get_status(path=notebook_path) - levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) + _ = w.permissions.get(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) Gets the permissions of an object. Objects can inherit permissions from their parent objects or root object. :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, - clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, + clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str @@ -83,7 +83,7 @@ :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, - clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, + clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str @@ -131,7 +131,7 @@ :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, - clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, + clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str @@ -148,7 +148,7 @@ :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, - clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, + clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str diff --git a/docs/workspace/iam/service_principals_v2.rst b/docs/workspace/iam/service_principals_v2.rst new file mode 100644 index 000000000..292722e40 --- /dev/null +++ b/docs/workspace/iam/service_principals_v2.rst @@ -0,0 +1,127 @@ +``w.service_principals_v2``: Service Principals +=============================================== +.. currentmodule:: databricks.sdk.service.iam + +.. py:class:: ServicePrincipalsV2API + + Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. + Databricks recommends creating service principals to run production jobs or modify production data. If all + processes that act on production data run with service principals, interactive users do not need any + write, delete, or modify privileges in production. This eliminates the risk of a user overwriting + production data by accident. + + .. py:method:: create( [, active: Optional[bool], application_id: Optional[str], display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], id: Optional[str], roles: Optional[List[ComplexValue]], schemas: Optional[List[ServicePrincipalSchema]]]) -> ServicePrincipal + + Creates a new service principal in the Databricks workspace. + + :param active: bool (optional) + If this user is active + :param application_id: str (optional) + UUID relating to the service principal + :param display_name: str (optional) + String that represents a concatenation of given and family names. + :param entitlements: List[:class:`ComplexValue`] (optional) + Entitlements assigned to the service principal. See [assigning entitlements] for a full list of + supported values. + + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + :param external_id: str (optional) + :param groups: List[:class:`ComplexValue`] (optional) + :param id: str (optional) + Databricks service principal ID. + :param roles: List[:class:`ComplexValue`] (optional) + Corresponds to AWS instance profile/arn role. + :param schemas: List[:class:`ServicePrincipalSchema`] (optional) + The schema of the List response. + + :returns: :class:`ServicePrincipal` + + + .. py:method:: delete(id: str) + + Delete a single service principal in the Databricks workspace. + + :param id: str + Unique ID for a service principal in the Databricks workspace. + + + + + .. py:method:: get(id: str) -> ServicePrincipal + + Gets the details for a single service principal define in the Databricks workspace. + + :param id: str + Unique ID for a service principal in the Databricks workspace. + + :returns: :class:`ServicePrincipal` + + + .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[ServicePrincipal] + + Gets the set of service principals associated with a Databricks workspace. + + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. + :param sort_order: :class:`ListSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: Iterator over :class:`ServicePrincipal` + + + .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) + + Partially updates the details of a single service principal in the Databricks workspace. + + :param id: str + Unique ID in the Databricks workspace. + :param operations: List[:class:`Patch`] (optional) + :param schemas: List[:class:`PatchSchema`] (optional) + The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + + + + + .. py:method:: update(id: str [, active: Optional[bool], application_id: Optional[str], display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], roles: Optional[List[ComplexValue]], schemas: Optional[List[ServicePrincipalSchema]]]) + + Updates the details of a single service principal. + + This action replaces the existing service principal with the same name. + + :param id: str + Databricks service principal ID. + :param active: bool (optional) + If this user is active + :param application_id: str (optional) + UUID relating to the service principal + :param display_name: str (optional) + String that represents a concatenation of given and family names. + :param entitlements: List[:class:`ComplexValue`] (optional) + Entitlements assigned to the service principal. See [assigning entitlements] for a full list of + supported values. + + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + :param external_id: str (optional) + :param groups: List[:class:`ComplexValue`] (optional) + :param roles: List[:class:`ComplexValue`] (optional) + Corresponds to AWS instance profile/arn role. + :param schemas: List[:class:`ServicePrincipalSchema`] (optional) + The schema of the List response. + + + \ No newline at end of file diff --git a/docs/workspace/iam/users_v2.rst b/docs/workspace/iam/users_v2.rst new file mode 100644 index 000000000..b49ada07a --- /dev/null +++ b/docs/workspace/iam/users_v2.rst @@ -0,0 +1,201 @@ +``w.users_v2``: Users +===================== +.. currentmodule:: databricks.sdk.service.iam + +.. py:class:: UsersV2API + + User identities recognized by Databricks and represented by email addresses. + + Databricks recommends using SCIM provisioning to sync users and groups automatically from your identity + provider to your Databricks workspace. SCIM streamlines onboarding a new employee or team by using your + identity provider to create users and groups in Databricks workspace and give them the proper level of + access. When a user leaves your organization or no longer needs access to Databricks workspace, admins can + terminate the user in your identity provider and that user’s account will also be removed from + Databricks workspace. This ensures a consistent offboarding process and prevents unauthorized users from + accessing sensitive data. + + .. py:method:: create( [, active: Optional[bool], display_name: Optional[str], emails: Optional[List[ComplexValue]], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], id: Optional[str], name: Optional[Name], roles: Optional[List[ComplexValue]], schemas: Optional[List[UserSchema]], user_name: Optional[str]]) -> User + + Creates a new user in the Databricks workspace. This new user will also be added to the Databricks + account. + + :param active: bool (optional) + If this user is active + :param display_name: str (optional) + String that represents a concatenation of given and family names. For example `John Smith`. This + field cannot be updated through the Workspace SCIM APIs when [identity federation is enabled]. Use + Account SCIM APIs to update `displayName`. + + [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation + :param emails: List[:class:`ComplexValue`] (optional) + All the emails associated with the Databricks user. + :param entitlements: List[:class:`ComplexValue`] (optional) + Entitlements assigned to the user. See [assigning entitlements] for a full list of supported values. + + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + :param external_id: str (optional) + External ID is not currently supported. It is reserved for future use. + :param groups: List[:class:`ComplexValue`] (optional) + :param id: str (optional) + Databricks user ID. + :param name: :class:`Name` (optional) + :param roles: List[:class:`ComplexValue`] (optional) + Corresponds to AWS instance profile/arn role. + :param schemas: List[:class:`UserSchema`] (optional) + The schema of the user. + :param user_name: str (optional) + Email address of the Databricks user. + + :returns: :class:`User` + + + .. py:method:: delete(id: str) + + Deletes a user. Deleting a user from a Databricks workspace also removes objects associated with the + user. + + :param id: str + Unique ID for a user in the Databricks workspace. + + + + + .. py:method:: get(id: str [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[GetSortOrder], start_index: Optional[int]]) -> User + + Gets information for a specific user in Databricks workspace. + + :param id: str + Unique ID for a user in the Databricks workspace. + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. Multi-part paths are supported. For example, `userName`, + `name.givenName`, and `emails`. + :param sort_order: :class:`GetSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: :class:`User` + + + .. py:method:: get_permission_levels() -> GetPasswordPermissionLevelsResponse + + Gets the permission levels that a user can have on an object. + + + :returns: :class:`GetPasswordPermissionLevelsResponse` + + + .. py:method:: get_permissions() -> PasswordPermissions + + Gets the permissions of all passwords. Passwords can inherit permissions from their root object. + + + :returns: :class:`PasswordPermissions` + + + .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[User] + + Gets details for all the users associated with a Databricks workspace. + + :param attributes: str (optional) + Comma-separated list of attributes to return in response. + :param count: int (optional) + Desired number of results per page. + :param excluded_attributes: str (optional) + Comma-separated list of attributes to exclude in response. + :param filter: str (optional) + Query by which the results have to be filtered. Supported operators are equals(`eq`), + contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be + formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently + only support simple expressions. + + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + :param sort_by: str (optional) + Attribute to sort the results. Multi-part paths are supported. For example, `userName`, + `name.givenName`, and `emails`. + :param sort_order: :class:`ListSortOrder` (optional) + The order to sort the results. + :param start_index: int (optional) + Specifies the index of the first result. First item is number 1. + + :returns: Iterator over :class:`User` + + + .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) + + Partially updates a user resource by applying the supplied operations on specific user attributes. + + :param id: str + Unique ID in the Databricks workspace. + :param operations: List[:class:`Patch`] (optional) + :param schemas: List[:class:`PatchSchema`] (optional) + The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + + + + + .. py:method:: set_permissions( [, access_control_list: Optional[List[PasswordAccessControlRequest]]]) -> PasswordPermissions + + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. + + :param access_control_list: List[:class:`PasswordAccessControlRequest`] (optional) + + :returns: :class:`PasswordPermissions` + + + .. py:method:: update(id: str [, active: Optional[bool], display_name: Optional[str], emails: Optional[List[ComplexValue]], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], name: Optional[Name], roles: Optional[List[ComplexValue]], schemas: Optional[List[UserSchema]], user_name: Optional[str]]) + + Replaces a user's information with the data supplied in request. + + :param id: str + Databricks user ID. + :param active: bool (optional) + If this user is active + :param display_name: str (optional) + String that represents a concatenation of given and family names. For example `John Smith`. This + field cannot be updated through the Workspace SCIM APIs when [identity federation is enabled]. Use + Account SCIM APIs to update `displayName`. + + [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation + :param emails: List[:class:`ComplexValue`] (optional) + All the emails associated with the Databricks user. + :param entitlements: List[:class:`ComplexValue`] (optional) + Entitlements assigned to the user. See [assigning entitlements] for a full list of supported values. + + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + :param external_id: str (optional) + External ID is not currently supported. It is reserved for future use. + :param groups: List[:class:`ComplexValue`] (optional) + :param name: :class:`Name` (optional) + :param roles: List[:class:`ComplexValue`] (optional) + Corresponds to AWS instance profile/arn role. + :param schemas: List[:class:`UserSchema`] (optional) + The schema of the user. + :param user_name: str (optional) + Email address of the Databricks user. + + + + + .. py:method:: update_permissions( [, access_control_list: Optional[List[PasswordAccessControlRequest]]]) -> PasswordPermissions + + Updates the permissions on all passwords. Passwords can inherit permissions from their root object. + + :param access_control_list: List[:class:`PasswordAccessControlRequest`] (optional) + + :returns: :class:`PasswordPermissions` + \ No newline at end of file diff --git a/docs/workspace/iamv2/index.rst b/docs/workspace/iamv2/index.rst new file mode 100644 index 000000000..9418cad26 --- /dev/null +++ b/docs/workspace/iamv2/index.rst @@ -0,0 +1,10 @@ + +Identity and Access Management +============================== + +Manage identities and workspace access. + +.. toctree:: + :maxdepth: 1 + + workspace_iam_v2 \ No newline at end of file diff --git a/docs/workspace/iamv2/workspace_iam_v2.rst b/docs/workspace/iamv2/workspace_iam_v2.rst new file mode 100644 index 000000000..4bc594c67 --- /dev/null +++ b/docs/workspace/iamv2/workspace_iam_v2.rst @@ -0,0 +1,60 @@ +``w.workspace_iam_v2``: workspace_iam.v2 +======================================== +.. currentmodule:: databricks.sdk.service.iamv2 + +.. py:class:: WorkspaceIamV2API + + These APIs are used to manage identities and the workspace access of these identities in . + + .. py:method:: get_workspace_access_detail_local(principal_id: int [, view: Optional[WorkspaceAccessDetailView]]) -> WorkspaceAccessDetail + + Returns the access details for a principal in the current workspace. Allows for checking access + details for any provisioned principal (user, service principal, or group) in the current workspace. * + Provisioned principal here refers to one that has been synced into Databricks from the customer's IdP + or added explicitly to Databricks via SCIM/UI. Allows for passing in a "view" parameter to control + what fields are returned (BASIC by default or FULL). + + :param principal_id: int + Required. The internal ID of the principal (user/sp/group) for which the access details are being + requested. + :param view: :class:`WorkspaceAccessDetailView` (optional) + Controls what fields are returned. + + :returns: :class:`WorkspaceAccessDetail` + + + .. py:method:: resolve_group_proxy(external_id: str) -> ResolveGroupResponse + + Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it + will be created in the account. If the customer is not onboarded onto Automatic Identity Management + (AIM), this will return an error. + + :param external_id: str + Required. The external ID of the group in the customer's IdP. + + :returns: :class:`ResolveGroupResponse` + + + .. py:method:: resolve_service_principal_proxy(external_id: str) -> ResolveServicePrincipalResponse + + Resolves an SP with the given external ID from the customer's IdP. If the SP does not exist, it will + be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the service principal in the customer's IdP. + + :returns: :class:`ResolveServicePrincipalResponse` + + + .. py:method:: resolve_user_proxy(external_id: str) -> ResolveUserResponse + + Resolves a user with the given external ID from the customer's IdP. If the user does not exist, it + will be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the user in the customer's IdP. + + :returns: :class:`ResolveUserResponse` + \ No newline at end of file diff --git a/docs/workspace/index.rst b/docs/workspace/index.rst index ec174a770..7f69ccba9 100644 --- a/docs/workspace/index.rst +++ b/docs/workspace/index.rst @@ -14,8 +14,10 @@ These APIs are available from WorkspaceClient compute/index dashboards/index database/index + dataquality/index files/index iam/index + iamv2/index jobs/index marketplace/index ml/index diff --git a/docs/workspace/ml/feature_engineering.rst b/docs/workspace/ml/feature_engineering.rst new file mode 100644 index 000000000..8d6988638 --- /dev/null +++ b/docs/workspace/ml/feature_engineering.rst @@ -0,0 +1,63 @@ +``w.feature_engineering``: Feature Engineering +============================================== +.. currentmodule:: databricks.sdk.service.ml + +.. py:class:: FeatureEngineeringAPI + + [description] + + .. py:method:: create_feature(feature: Feature) -> Feature + + Create a Feature. + + :param feature: :class:`Feature` + Feature to create. + + :returns: :class:`Feature` + + + .. py:method:: delete_feature(full_name: str) + + Delete a Feature. + + :param full_name: str + Name of the feature to delete. + + + + + .. py:method:: get_feature(full_name: str) -> Feature + + Get a Feature. + + :param full_name: str + Name of the feature to get. + + :returns: :class:`Feature` + + + .. py:method:: list_features( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Feature] + + List Features. + + :param page_size: int (optional) + The maximum number of results to return. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`Feature` + + + .. py:method:: update_feature(full_name: str, feature: Feature, update_mask: str) -> Feature + + Update a Feature. + + :param full_name: str + The full three-part name (catalog, schema, name) of the feature. + :param feature: :class:`Feature` + Feature to update. + :param update_mask: str + The list of fields to update. + + :returns: :class:`Feature` + \ No newline at end of file diff --git a/docs/workspace/ml/index.rst b/docs/workspace/ml/index.rst index 44b2dbb0f..04c803603 100644 --- a/docs/workspace/ml/index.rst +++ b/docs/workspace/ml/index.rst @@ -8,6 +8,7 @@ Create and manage experiments, features, and other machine learning artifacts :maxdepth: 1 experiments + feature_engineering feature_store forecasting materialized_features diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 2d34256e4..9a6c8f286 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -120,7 +120,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a model version. @@ -734,14 +734,13 @@ w = WorkspaceClient() - model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + model = w.model_registry.get_model(name=created.registered_model.name) - w.model_registry.update_model_version( + w.model_registry.update_model( + name=model.registered_model_databricks.name, description=f"sdk-{time.time_ns()}", - name=created.model_version.name, - version=created.model_version.version, ) Updates a registered model. diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index c96f3fbbe..5946109ce 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -376,6 +376,19 @@ .. py:method:: update_config_and_wait(name: str [, auto_capture_config: Optional[AutoCaptureConfigInput], served_entities: Optional[List[ServedEntityInput]], served_models: Optional[List[ServedModelInput]], traffic_config: Optional[TrafficConfig], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + .. py:method:: update_notifications(name: str [, email_notifications: Optional[EmailNotifications]]) -> UpdateInferenceEndpointNotificationsResponse + + Updates the email and webhook notification settings for an endpoint. + + :param name: str + The name of the serving endpoint whose notifications are being updated. This field is required. + :param email_notifications: :class:`EmailNotifications` (optional) + The email notification settings to update. Specify email addresses to notify when endpoint state + changes occur. + + :returns: :class:`UpdateInferenceEndpointNotificationsResponse` + + .. py:method:: update_permissions(serving_endpoint_id: str [, access_control_list: Optional[List[ServingEndpointAccessControlRequest]]]) -> ServingEndpointPermissions Updates the permissions on a serving endpoint. Serving endpoints can inherit permissions from their diff --git a/docs/workspace/settingsv2/workspace_settings_v2.rst b/docs/workspace/settingsv2/workspace_settings_v2.rst index da8557baf..2d6d379df 100644 --- a/docs/workspace/settingsv2/workspace_settings_v2.rst +++ b/docs/workspace/settingsv2/workspace_settings_v2.rst @@ -8,7 +8,8 @@ .. py:method:: get_public_workspace_setting(name: str) -> Setting - Get a setting value at workspace level + Get a setting value at workspace level. See :method:settingsv2/listworkspacesettingsmetadata for list + of setting available via public APIs. :param name: str @@ -17,9 +18,9 @@ .. py:method:: list_workspace_settings_metadata( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SettingsMetadata] - List valid setting keys and metadata. These settings are available to referenced via [GET - /api/2.1/settings/{name}](#~1api~1workspace~1settingsv2~1getpublicworkspacesetting) and [PATCH - /api/2.1/settings/{name}](#~1api~1workspace~1settingsv2~patchpublicworkspacesetting) APIs + List valid setting keys and metadata. These settings are available to be referenced via GET + :method:settingsv2/getpublicworkspacesetting and PATCH :method:settingsv2/patchpublicworkspacesetting + APIs :param page_size: int (optional) The maximum number of settings to return. The service may return fewer than this value. If @@ -37,7 +38,8 @@ .. py:method:: patch_public_workspace_setting(name: str, setting: Setting) -> Setting - Patch a setting value at workspace level + Patch a setting value at workspace level. See :method:settingsv2/listworkspacesettingsmetadata for + list of setting available via public APIs at workspace level. :param name: str :param setting: :class:`Setting` diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index fd81e1b24..1a7c88de9 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -101,12 +101,25 @@ .. code-block:: + import time + from databricks.sdk import WorkspaceClient - from databricks.sdk.service import sharing w = WorkspaceClient() - all = w.providers.list(sharing.ListProvidersRequest()) + public_share_recipient = """{ + "shareCredentialsVersion":1, + "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", + "endpoint":"https://sharing.delta.io/delta-sharing/" + } + """ + + created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) + + shares = w.providers.list_shares(name=created.name) + + # cleanup + w.providers.delete(name=created.name) Gets an array of available authentication providers. The caller must either be a metastore admin or the owner of the providers. Providers not owned by the caller are not included in the response. There diff --git a/docs/workspace/sharing/shares.rst b/docs/workspace/sharing/shares.rst index 1bf63fdf7..d749bf458 100644 --- a/docs/workspace/sharing/shares.rst +++ b/docs/workspace/sharing/shares.rst @@ -81,7 +81,7 @@ :returns: :class:`ShareInfo` - .. py:method:: list( [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ShareInfo] + .. py:method:: list_shares( [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[ShareInfo] Usage: diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index f0081b3f2..0dfb63fbf 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SHOW TABLES", + query_text="SELECT 1", ) ) diff --git a/docs/workspace/sql/statement_execution.rst b/docs/workspace/sql/statement_execution.rst index 12bd8ba7a..7823048b2 100644 --- a/docs/workspace/sql/statement_execution.rst +++ b/docs/workspace/sql/statement_execution.rst @@ -29,17 +29,17 @@ the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement. - In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 - seconds; if the statement execution finishes within this time, the result data is returned directly in the - response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns - with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call - doesn't wait for the statement to finish but returns directly with a statement ID. The status of the - statement execution can be polled by issuing :method:statementexecution/getStatement with the statement - ID. Once the execution has succeeded, this call also returns the result and metadata in the response. - - Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 - seconds; if the statement execution finishes within this time, the result data is returned directly in the - response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can - be used to fetch status and results in the same way as in the asynchronous mode. + In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call waits up to + 30 seconds; if the statement execution finishes within this time, the result data is returned directly in + the response. If the execution takes longer than 30 seconds, the execution is canceled and the call + returns with a `CANCELED` state. - **Asynchronous mode** (`wait_timeout=0s` and `on_wait_timeout` is + ignored): The call doesn't wait for the statement to finish but returns directly with a statement ID. The + status of the statement execution can be polled by issuing :method:statementexecution/getStatement with + the statement ID. Once the execution has succeeded, this call also returns the result and metadata in the + response. - **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits + for up to 10 seconds; if the statement execution finishes within this time, the result data is returned + directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The + statement ID can be used to fetch status and results in the same way as in the asynchronous mode. Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest @@ -92,7 +92,7 @@ .. py:method:: cancel_execution(statement_id: str) Requests that an executing statement be canceled. Callers must poll for status to see the terminal - state. + state. Cancel response is empty; receiving response indicates successful receipt. :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required @@ -103,7 +103,52 @@ .. py:method:: execute_statement(statement: str, warehouse_id: str [, byte_limit: Optional[int], catalog: Optional[str], disposition: Optional[Disposition], format: Optional[Format], on_wait_timeout: Optional[ExecuteStatementRequestOnWaitTimeout], parameters: Optional[List[StatementParameterListItem]], row_limit: Optional[int], schema: Optional[str], wait_timeout: Optional[str]]) -> StatementResponse - Execute a SQL statement + Execute a SQL statement and optionally await its results for a specified time. + + **Use case: small result sets with INLINE + JSON_ARRAY** + + For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of + `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data. + + **Use case: large result sets with EXTERNAL_LINKS** + + Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets efficiently. The + main differences from using `INLINE` disposition are that the result data is accessed with URLs, and + that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and `CSV` compared to only + `JSON_ARRAY` with `INLINE`. + + ** URLs** + + External links point to data stored within your workspace's internal storage, in the form of a URL. + The URLs are valid for only a short period, <= 15 minutes. Alongside each `external_link` is an + expiration field indicating the time at which the URL is no longer valid. In `EXTERNAL_LINKS` mode, + chunks can be resolved and fetched multiple times and in parallel. + + ---- + + ### **Warning: Databricks strongly recommends that you protect the URLs that are returned by the + `EXTERNAL_LINKS` disposition.** + + When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be used to + download the results directly from . As a short-lived is embedded in this URL, you should protect the + URL. + + Because URLs are already generated with embedded temporary s, you must not set an `Authorization` + header in the download requests. + + The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case. + + See also [Security best practices]. + + ---- + + StatementResponse contains `statement_id` and `status`; other fields might be absent or present + depending on context. If the SQL warehouse fails to execute the provided statement, a 200 response is + returned with `status.state` set to `FAILED` (in contrast to a failure when accepting the request, + which results in a non-200 response). Details of the error can be found at `status.error` in case of + execution failures. + + [Security best practices]: https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices :param statement: str The SQL statement to execute. The statement can optionally be parameterized, see `parameters`. The @@ -117,12 +162,32 @@ representations and might not match the final size in the requested `format`. If the result was truncated due to the byte limit, then `truncated` in the response is set to `true`. When using `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is applied if `byte_limit` is not - explcitly set. + explicitly set. :param catalog: str (optional) Sets default catalog for statement execution, similar to [`USE CATALOG`] in SQL. [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html :param disposition: :class:`Disposition` (optional) + The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`. + + Statements executed with `INLINE` disposition will return result data inline, in `JSON_ARRAY` + format, in a series of chunks. If a given statement produces a result set with a size larger than 25 + MiB, that statement execution is aborted, and no result set will be available. + + **NOTE** Byte limits are computed based upon internal representations of the result set data, and + might not match the sizes visible in JSON responses. + + Statements executed with `EXTERNAL_LINKS` disposition will return result data as external links: + URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS` disposition + allows statements to generate arbitrarily sized result sets for fetching up to 100 GiB. The + resulting links have two important properties: + + 1. They point to resources _external_ to the Databricks compute; therefore any associated + authentication information (typically a personal access token, OAuth token, or similar) _must be + removed_ when fetching from these links. + + 2. These are URLs with a specific expiration, indicated in the response. The behavior when + attempting to use an expired link is cloud specific. :param format: :class:`Format` (optional) Statement execution supports three result formats: `JSON_ARRAY` (default), `ARROW_STREAM`, and `CSV`. @@ -173,13 +238,13 @@ For example, the following statement contains two parameters, `my_name` and `my_date`: - SELECT * FROM my_table WHERE name = :my_name AND date = :my_date + ``` SELECT * FROM my_table WHERE name = :my_name AND date = :my_date ``` The parameters can be passed in the request body as follows: - { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date", + ` { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date", "parameters": [ { "name": "my_name", "value": "the name" }, { "name": "my_date", "value": - "2020-01-01", "type": "DATE" } ] } + "2020-01-01", "type": "DATE" } ] } ` Currently, positional parameters denoted by a `?` marker are not supported by the Databricks SQL Statement Execution API. @@ -215,11 +280,12 @@ .. py:method:: get_statement(statement_id: str) -> StatementResponse - This request can be used to poll for the statement's status. When the `status.state` field is - `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. When the - statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the - state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and - further calls will receive an HTTP 404 response. + This request can be used to poll for the statement's status. StatementResponse contains `statement_id` + and `status`; other fields might be absent or present depending on context. When the `status.state` + field is `SUCCEEDED` it will also return the result manifest and the first chunk of the result data. + When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 + with the state set. After at least 12 hours in terminal state, the statement is removed from the + warehouse and further calls will receive an HTTP 404 response. **NOTE** This call currently might take up to 5 seconds to get the latest status and result. @@ -238,6 +304,7 @@ can be used to fetch subsequent chunks. The response structure is identical to the nested `result` element described in the :method:statementexecution/getStatement request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set. + Depending on `disposition`, the response returns chunks of data either inline, or as links. :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required diff --git a/docs/workspace/sql/warehouses.rst b/docs/workspace/sql/warehouses.rst index 94911bc1e..d5826ebe2 100644 --- a/docs/workspace/sql/warehouses.rst +++ b/docs/workspace/sql/warehouses.rst @@ -40,8 +40,7 @@ The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for - non-serverless warehouses - 0 indicates no autostop. + Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. Defaults to 120 mins :param channel: :class:`Channel` (optional) @@ -66,7 +65,7 @@ :param max_num_clusters: int (optional) Maximum number of clusters that the autoscaler will create to handle concurrent queries. - Supported values: - Must be >= min_num_clusters - Must be <= 30. + Supported values: - Must be >= min_num_clusters - Must be <= 40. Defaults to min_clusters if unset. :param min_num_clusters: int (optional) @@ -82,12 +81,15 @@ Supported values: - Must be unique within an org. - Must be less than 100 characters. :param spot_instance_policy: :class:`SpotInstancePolicy` (optional) + Configurations whether the endpoint should use spot instances. :param tags: :class:`EndpointTags` (optional) A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - Number of tags < 45. :param warehouse_type: :class:`CreateWarehouseRequestWarehouseType` (optional) + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and + also set the field `enable_serverless_compute` to `true`. :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. @@ -169,13 +171,13 @@ Defaults to false. :param enable_serverless_compute: bool (optional) - Configures whether the warehouse should use serverless compute. + Configures whether the warehouse should use serverless compute :param instance_profile_arn: str (optional) Deprecated. Instance profile used to pass IAM role to the cluster :param max_num_clusters: int (optional) Maximum number of clusters that the autoscaler will create to handle concurrent queries. - Supported values: - Must be >= min_num_clusters - Must be <= 30. + Supported values: - Must be >= min_num_clusters - Must be <= 40. Defaults to min_clusters if unset. :param min_num_clusters: int (optional) @@ -191,12 +193,15 @@ Supported values: - Must be unique within an org. - Must be less than 100 characters. :param spot_instance_policy: :class:`SpotInstancePolicy` (optional) + Configurations whether the endpoint should use spot instances. :param tags: :class:`EndpointTags` (optional) A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. Supported values: - Number of tags < 45. :param warehouse_type: :class:`EditWarehouseRequestWarehouseType` (optional) + Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO` and + also set the field `enable_serverless_compute` to `true`. :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. @@ -272,7 +277,7 @@ :returns: :class:`GetWorkspaceWarehouseConfigResponse` - .. py:method:: list( [, run_as_user_id: Optional[int]]) -> Iterator[EndpointInfo] + .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str], run_as_user_id: Optional[int]]) -> Iterator[EndpointInfo] Usage: @@ -286,11 +291,19 @@ all = w.warehouses.list(sql.ListWarehousesRequest()) - Lists all SQL warehouses that a user has manager permissions on. + Lists all SQL warehouses that a user has access to. + :param page_size: int (optional) + The max number of warehouses to return. + :param page_token: str (optional) + A page token, received from a previous `ListWarehouses` call. Provide this to retrieve the + subsequent page; otherwise the first will be retrieved. + + When paginating, all other parameters provided to `ListWarehouses` must match the call that provided + the page token. :param run_as_user_id: int (optional) - Service Principal which will be used to fetch the list of warehouses. If not specified, the user - from the session header is used. + Service Principal which will be used to fetch the list of endpoints. If not specified, SQL Gateway + will use the user from the session header. :returns: Iterator over :class:`EndpointInfo` @@ -307,7 +320,7 @@ :returns: :class:`WarehousePermissions` - .. py:method:: set_workspace_warehouse_config( [, channel: Optional[Channel], config_param: Optional[RepeatedEndpointConfPairs], data_access_config: Optional[List[EndpointConfPair]], enabled_warehouse_types: Optional[List[WarehouseTypePair]], global_param: Optional[RepeatedEndpointConfPairs], google_service_account: Optional[str], instance_profile_arn: Optional[str], security_policy: Optional[SetWorkspaceWarehouseConfigRequestSecurityPolicy], sql_configuration_parameters: Optional[RepeatedEndpointConfPairs]]) + .. py:method:: set_workspace_warehouse_config( [, channel: Optional[Channel], config_param: Optional[RepeatedEndpointConfPairs], data_access_config: Optional[List[EndpointConfPair]], enable_serverless_compute: Optional[bool], enabled_warehouse_types: Optional[List[WarehouseTypePair]], global_param: Optional[RepeatedEndpointConfPairs], google_service_account: Optional[str], instance_profile_arn: Optional[str], security_policy: Optional[SetWorkspaceWarehouseConfigRequestSecurityPolicy], sql_configuration_parameters: Optional[RepeatedEndpointConfPairs]]) Sets the workspace level configuration that is shared by all SQL warehouses in a workspace. @@ -317,6 +330,8 @@ Deprecated: Use sql_configuration_parameters :param data_access_config: List[:class:`EndpointConfPair`] (optional) Spark confs for external hive metastore configuration JSON serialized size must be less than <= 512K + :param enable_serverless_compute: bool (optional) + Enable Serverless compute for SQL warehouses :param enabled_warehouse_types: List[:class:`WarehouseTypePair`] (optional) List of Warehouse Types allowed in this workspace (limits allowed value of the type field in CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be @@ -328,7 +343,8 @@ :param google_service_account: str (optional) GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage :param instance_profile_arn: str (optional) - AWS Only: Instance profile used to pass IAM role to the cluster + AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This configuration is + also applied to the workspace's serverless compute for notebooks and jobs. :param security_policy: :class:`SetWorkspaceWarehouseConfigRequestSecurityPolicy` (optional) Security policy for warehouses :param sql_configuration_parameters: :class:`RepeatedEndpointConfPairs` (optional) diff --git a/docs/workspace/tags/tag_policies.rst b/docs/workspace/tags/tag_policies.rst index 2be7f5360..35f893b2e 100644 --- a/docs/workspace/tags/tag_policies.rst +++ b/docs/workspace/tags/tag_policies.rst @@ -4,11 +4,14 @@ .. py:class:: TagPoliciesAPI - The Tag Policy API allows you to manage tag policies in Databricks. + The Tag Policy API allows you to manage policies for governed tags in Databricks. Permissions for tag + policies can be managed using the [Account Access Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy .. py:method:: create_tag_policy(tag_policy: TagPolicy) -> TagPolicy - Creates a new tag policy. + Creates a new tag policy, making the associated tag key governed. :param tag_policy: :class:`TagPolicy` @@ -17,7 +20,7 @@ .. py:method:: delete_tag_policy(tag_key: str) - Deletes a tag policy by its key. + Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. :param tag_key: str @@ -26,7 +29,7 @@ .. py:method:: get_tag_policy(tag_key: str) -> TagPolicy - Gets a single tag policy by its key. + Gets a single tag policy by its associated governed tag's key. :param tag_key: str @@ -35,7 +38,7 @@ .. py:method:: list_tag_policies( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[TagPolicy] - Lists all tag policies in the account. + Lists the tag policies for all governed tags in the account. :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than @@ -49,7 +52,7 @@ .. py:method:: update_tag_policy(tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy - Updates an existing tag policy. + Updates an existing tag policy for a single governed tag. :param tag_key: str :param tag_policy: :class:`TagPolicy` diff --git a/docs/workspace/workspace/git_credentials.rst b/docs/workspace/workspace/git_credentials.rst index 2dd9451c2..a5d8e8bef 100644 --- a/docs/workspace/workspace/git_credentials.rst +++ b/docs/workspace/workspace/git_credentials.rst @@ -10,7 +10,7 @@ [more info]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html - .. py:method:: create(git_provider: str [, git_username: Optional[str], is_default_for_provider: Optional[bool], name: Optional[str], personal_access_token: Optional[str]]) -> CreateCredentialsResponse + .. py:method:: create(git_provider: str [, git_email: Optional[str], git_username: Optional[str], is_default_for_provider: Optional[bool], name: Optional[str], personal_access_token: Optional[str]]) -> CreateCredentialsResponse Usage: @@ -34,12 +34,16 @@ Git provider. This field is case-insensitive. The available Git providers are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and `awsCodeCommit`. + :param git_email: str (optional) + The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required for + most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider :param git_username: str (optional) - The username or email provided with your Git provider account, depending on which provider you are - using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may - be used. For GitLab, GitLab Enterprise Edition, email must be used. For AWS CodeCommit, BitBucket or - BitBucket Server, username must be used. For all other providers please see your provider's Personal - Access Token authentication documentation to see what is supported. + The username provided with your Git provider account and associated with the credential. For most + Git providers it is only used to set the Git committer & author names for commits, however it may be + required for authentication depending on your Git provider / token requirements. Required for AWS + CodeCommit. :param is_default_for_provider: bool (optional) if the credential is the default for the given provider :param name: str (optional) @@ -108,7 +112,7 @@ :returns: Iterator over :class:`CredentialInfo` - .. py:method:: update(credential_id: int, git_provider: str [, git_username: Optional[str], is_default_for_provider: Optional[bool], name: Optional[str], personal_access_token: Optional[str]]) + .. py:method:: update(credential_id: int, git_provider: str [, git_email: Optional[str], git_username: Optional[str], is_default_for_provider: Optional[bool], name: Optional[str], personal_access_token: Optional[str]]) Usage: @@ -141,12 +145,16 @@ Git provider. This field is case-insensitive. The available Git providers are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and `awsCodeCommit`. + :param git_email: str (optional) + The authenticating email associated with your Git provider user account. Used for authentication + with the remote repository and also sets the author & committer identity for commits. Required for + most Git providers except AWS CodeCommit. Learn more at + https://docs.databricks.com/aws/en/repos/get-access-tokens-from-git-provider :param git_username: str (optional) - The username or email provided with your Git provider account, depending on which provider you are - using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may - be used. For GitLab, GitLab Enterprise Edition, email must be used. For AWS CodeCommit, BitBucket or - BitBucket Server, username must be used. For all other providers please see your provider's Personal - Access Token authentication documentation to see what is supported. + The username provided with your Git provider account and associated with the credential. For most + Git providers it is only used to set the Git committer & author names for commits, however it may be + required for authentication depending on your Git provider / token requirements. Required for AWS + CodeCommit. :param is_default_for_provider: bool (optional) if the credential is the default for the given provider :param name: str (optional) diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 4fba581e8..fbcb5374b 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -175,11 +175,18 @@ notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" w.workspace.import_( - content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), - format=workspace.ImportFormat.SOURCE, - language=workspace.Language.SQL, - overwrite=True, path=notebook_path, + overwrite=True, + format=workspace.ImportFormat.SOURCE, + language=workspace.Language.PYTHON, + content=base64.b64encode( + ( + """import time + time.sleep(10) + dbutils.notebook.exit('hello') + """ + ).encode() + ).decode(), ) Imports a workspace object (for example, a notebook or file) or the contents of an entire directory. diff --git a/tests/databricks/sdk/service/common.py b/tests/databricks/sdk/service/common.py new file mode 100755 index 000000000..2fcd455c5 --- /dev/null +++ b/tests/databricks/sdk/service/common.py @@ -0,0 +1,232 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional + +from databricks.sdk.service._internal import _enum, _from_dict + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class DatabricksServiceExceptionWithDetailsProto: + """Serialization format for DatabricksServiceException with error details. This message doesn't + work for ScalaPB-04 as google.protobuf.Any is only available to ScalaPB-09. Note the definition + of this message should be in sync with DatabricksServiceExceptionProto defined in + /api-base/proto/legacy/databricks.proto except the later one doesn't have the error details + field defined.""" + + details: Optional[List[dict]] = None + """@pbjson-skip""" + + error_code: Optional[ErrorCode] = None + + message: Optional[str] = None + + stack_trace: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the DatabricksServiceExceptionWithDetailsProto into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.details: + body["details"] = [v for v in self.details] + if self.error_code is not None: + body["error_code"] = self.error_code.value + if self.message is not None: + body["message"] = self.message + if self.stack_trace is not None: + body["stack_trace"] = self.stack_trace + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabricksServiceExceptionWithDetailsProto into a shallow dictionary of its immediate attributes.""" + body = {} + if self.details: + body["details"] = self.details + if self.error_code is not None: + body["error_code"] = self.error_code + if self.message is not None: + body["message"] = self.message + if self.stack_trace is not None: + body["stack_trace"] = self.stack_trace + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabricksServiceExceptionWithDetailsProto: + """Deserializes the DatabricksServiceExceptionWithDetailsProto from a dictionary.""" + return cls( + details=d.get("details", None), + error_code=_enum(d, "error_code", ErrorCode), + message=d.get("message", None), + stack_trace=d.get("stack_trace", None), + ) + + +class ErrorCode(Enum): + """Legacy definition of the ErrorCode enum. Please keep in sync with + api-base/proto/error_code.proto (except status code mapping annotations as this file doesn't + have them). Will be removed eventually, pending the ScalaPB 0.4 cleanup.""" + + ABORTED = "ABORTED" + ALREADY_EXISTS = "ALREADY_EXISTS" + BAD_REQUEST = "BAD_REQUEST" + CANCELLED = "CANCELLED" + CATALOG_ALREADY_EXISTS = "CATALOG_ALREADY_EXISTS" + CATALOG_DOES_NOT_EXIST = "CATALOG_DOES_NOT_EXIST" + CATALOG_NOT_EMPTY = "CATALOG_NOT_EMPTY" + COULD_NOT_ACQUIRE_LOCK = "COULD_NOT_ACQUIRE_LOCK" + CUSTOMER_UNAUTHORIZED = "CUSTOMER_UNAUTHORIZED" + DAC_ALREADY_EXISTS = "DAC_ALREADY_EXISTS" + DAC_DOES_NOT_EXIST = "DAC_DOES_NOT_EXIST" + DATA_LOSS = "DATA_LOSS" + DEADLINE_EXCEEDED = "DEADLINE_EXCEEDED" + DEPLOYMENT_TIMEOUT = "DEPLOYMENT_TIMEOUT" + DIRECTORY_NOT_EMPTY = "DIRECTORY_NOT_EMPTY" + DIRECTORY_PROTECTED = "DIRECTORY_PROTECTED" + DRY_RUN_FAILED = "DRY_RUN_FAILED" + ENDPOINT_NOT_FOUND = "ENDPOINT_NOT_FOUND" + EXTERNAL_LOCATION_ALREADY_EXISTS = "EXTERNAL_LOCATION_ALREADY_EXISTS" + EXTERNAL_LOCATION_DOES_NOT_EXIST = "EXTERNAL_LOCATION_DOES_NOT_EXIST" + FEATURE_DISABLED = "FEATURE_DISABLED" + GIT_CONFLICT = "GIT_CONFLICT" + GIT_REMOTE_ERROR = "GIT_REMOTE_ERROR" + GIT_SENSITIVE_TOKEN_DETECTED = "GIT_SENSITIVE_TOKEN_DETECTED" + GIT_UNKNOWN_REF = "GIT_UNKNOWN_REF" + GIT_URL_NOT_ON_ALLOW_LIST = "GIT_URL_NOT_ON_ALLOW_LIST" + INSECURE_PARTNER_RESPONSE = "INSECURE_PARTNER_RESPONSE" + INTERNAL_ERROR = "INTERNAL_ERROR" + INVALID_PARAMETER_VALUE = "INVALID_PARAMETER_VALUE" + INVALID_STATE = "INVALID_STATE" + INVALID_STATE_TRANSITION = "INVALID_STATE_TRANSITION" + IO_ERROR = "IO_ERROR" + IPYNB_FILE_IN_REPO = "IPYNB_FILE_IN_REPO" + MALFORMED_PARTNER_RESPONSE = "MALFORMED_PARTNER_RESPONSE" + MALFORMED_REQUEST = "MALFORMED_REQUEST" + MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST = "MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST" + MAX_BLOCK_SIZE_EXCEEDED = "MAX_BLOCK_SIZE_EXCEEDED" + MAX_CHILD_NODE_SIZE_EXCEEDED = "MAX_CHILD_NODE_SIZE_EXCEEDED" + MAX_LIST_SIZE_EXCEEDED = "MAX_LIST_SIZE_EXCEEDED" + MAX_NOTEBOOK_SIZE_EXCEEDED = "MAX_NOTEBOOK_SIZE_EXCEEDED" + MAX_READ_SIZE_EXCEEDED = "MAX_READ_SIZE_EXCEEDED" + METASTORE_ALREADY_EXISTS = "METASTORE_ALREADY_EXISTS" + METASTORE_DOES_NOT_EXIST = "METASTORE_DOES_NOT_EXIST" + METASTORE_NOT_EMPTY = "METASTORE_NOT_EMPTY" + NOT_FOUND = "NOT_FOUND" + NOT_IMPLEMENTED = "NOT_IMPLEMENTED" + PARTIAL_DELETE = "PARTIAL_DELETE" + PERMISSION_DENIED = "PERMISSION_DENIED" + PERMISSION_NOT_PROPAGATED = "PERMISSION_NOT_PROPAGATED" + PRINCIPAL_DOES_NOT_EXIST = "PRINCIPAL_DOES_NOT_EXIST" + PROJECTS_OPERATION_TIMEOUT = "PROJECTS_OPERATION_TIMEOUT" + PROVIDER_ALREADY_EXISTS = "PROVIDER_ALREADY_EXISTS" + PROVIDER_DOES_NOT_EXIST = "PROVIDER_DOES_NOT_EXIST" + PROVIDER_SHARE_NOT_ACCESSIBLE = "PROVIDER_SHARE_NOT_ACCESSIBLE" + QUOTA_EXCEEDED = "QUOTA_EXCEEDED" + RECIPIENT_ALREADY_EXISTS = "RECIPIENT_ALREADY_EXISTS" + RECIPIENT_DOES_NOT_EXIST = "RECIPIENT_DOES_NOT_EXIST" + REQUEST_LIMIT_EXCEEDED = "REQUEST_LIMIT_EXCEEDED" + RESOURCE_ALREADY_EXISTS = "RESOURCE_ALREADY_EXISTS" + RESOURCE_CONFLICT = "RESOURCE_CONFLICT" + RESOURCE_DOES_NOT_EXIST = "RESOURCE_DOES_NOT_EXIST" + RESOURCE_EXHAUSTED = "RESOURCE_EXHAUSTED" + RESOURCE_LIMIT_EXCEEDED = "RESOURCE_LIMIT_EXCEEDED" + SCHEMA_ALREADY_EXISTS = "SCHEMA_ALREADY_EXISTS" + SCHEMA_DOES_NOT_EXIST = "SCHEMA_DOES_NOT_EXIST" + SCHEMA_NOT_EMPTY = "SCHEMA_NOT_EMPTY" + SEARCH_QUERY_TOO_LONG = "SEARCH_QUERY_TOO_LONG" + SEARCH_QUERY_TOO_SHORT = "SEARCH_QUERY_TOO_SHORT" + SERVICE_UNDER_MAINTENANCE = "SERVICE_UNDER_MAINTENANCE" + SHARE_ALREADY_EXISTS = "SHARE_ALREADY_EXISTS" + SHARE_DOES_NOT_EXIST = "SHARE_DOES_NOT_EXIST" + STORAGE_CREDENTIAL_ALREADY_EXISTS = "STORAGE_CREDENTIAL_ALREADY_EXISTS" + STORAGE_CREDENTIAL_DOES_NOT_EXIST = "STORAGE_CREDENTIAL_DOES_NOT_EXIST" + TABLE_ALREADY_EXISTS = "TABLE_ALREADY_EXISTS" + TABLE_DOES_NOT_EXIST = "TABLE_DOES_NOT_EXIST" + TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE" + UNAUTHENTICATED = "UNAUTHENTICATED" + UNAVAILABLE = "UNAVAILABLE" + UNKNOWN = "UNKNOWN" + UNPARSEABLE_HTTP_ERROR = "UNPARSEABLE_HTTP_ERROR" + WORKSPACE_TEMPORARILY_UNAVAILABLE = "WORKSPACE_TEMPORARILY_UNAVAILABLE" + + +@dataclass +class Operation: + """This resource represents a long-running operation that is the result of a network API call.""" + + done: Optional[bool] = None + """If the value is `false`, it means the operation is still in progress. If `true`, the operation + is completed, and either `error` or `response` is available.""" + + error: Optional[DatabricksServiceExceptionWithDetailsProto] = None + """The error result of the operation in case of failure or cancellation.""" + + metadata: Optional[dict] = None + """Service-specific metadata associated with the operation. It typically contains progress + information and common metadata such as create time. Some services might not provide such + metadata. Any method that returns a long-running operation should document the metadata type, if + any.""" + + name: Optional[str] = None + """The server-assigned name, which is only unique within the same service that originally returns + it. If you use the default HTTP mapping, the `name` should be a resource name ending with + `operations/{unique_id}`. + + Note: multi-segment resource names are not yet supported in the RPC framework and SDK/TF. Until + that support is added, `name` must be string without internal `/` separators.""" + + response: Optional[dict] = None + """The normal, successful response of the operation. If the original method returns no data on + success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is + standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the + response should have the type `XxxResponse`, where `Xxx` is the original method name. For + example, if the original method name is `TakeSnapshot()`, the inferred response type is + `TakeSnapshotResponse`.""" + + def as_dict(self) -> dict: + """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.done is not None: + body["done"] = self.done + if self.error: + body["error"] = self.error.as_dict() + if self.metadata: + body["metadata"] = self.metadata + if self.name is not None: + body["name"] = self.name + if self.response: + body["response"] = self.response + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Operation into a shallow dictionary of its immediate attributes.""" + body = {} + if self.done is not None: + body["done"] = self.done + if self.error: + body["error"] = self.error + if self.metadata: + body["metadata"] = self.metadata + if self.name is not None: + body["name"] = self.name + if self.response: + body["response"] = self.response + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Operation: + """Deserializes the Operation from a dictionary.""" + return cls( + done=d.get("done", None), + error=_from_dict(d, "error", DatabricksServiceExceptionWithDetailsProto), + metadata=d.get("metadata", None), + name=d.get("name", None), + response=d.get("response", None), + ) diff --git a/tests/databricks/sdk/service/httpcallv2.py b/tests/databricks/sdk/service/httpcallv2.py new file mode 100755 index 000000000..60693ac41 --- /dev/null +++ b/tests/databricks/sdk/service/httpcallv2.py @@ -0,0 +1,246 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +from databricks.sdk.common.types.fieldmask import FieldMask + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class ComplexQueryParam: + nested_optional_query_param: Optional[str] = None + + nested_repeated_query_param: Optional[List[str]] = None + + def as_dict(self) -> dict: + """Serializes the ComplexQueryParam into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.nested_optional_query_param is not None: + body["nested_optional_query_param"] = self.nested_optional_query_param + if self.nested_repeated_query_param: + body["nested_repeated_query_param"] = [v for v in self.nested_repeated_query_param] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ComplexQueryParam into a shallow dictionary of its immediate attributes.""" + body = {} + if self.nested_optional_query_param is not None: + body["nested_optional_query_param"] = self.nested_optional_query_param + if self.nested_repeated_query_param: + body["nested_repeated_query_param"] = self.nested_repeated_query_param + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ComplexQueryParam: + """Deserializes the ComplexQueryParam from a dictionary.""" + return cls( + nested_optional_query_param=d.get("nested_optional_query_param", None), + nested_repeated_query_param=d.get("nested_repeated_query_param", None), + ) + + +@dataclass +class Resource: + any_field: Optional[dict] = None + + body_field: Optional[str] = None + + nested_path_param_bool: Optional[bool] = None + + nested_path_param_int: Optional[int] = None + + nested_path_param_string: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the Resource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.any_field: + body["any_field"] = self.any_field + if self.body_field is not None: + body["body_field"] = self.body_field + if self.nested_path_param_bool is not None: + body["nested_path_param_bool"] = self.nested_path_param_bool + if self.nested_path_param_int is not None: + body["nested_path_param_int"] = self.nested_path_param_int + if self.nested_path_param_string is not None: + body["nested_path_param_string"] = self.nested_path_param_string + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Resource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.any_field: + body["any_field"] = self.any_field + if self.body_field is not None: + body["body_field"] = self.body_field + if self.nested_path_param_bool is not None: + body["nested_path_param_bool"] = self.nested_path_param_bool + if self.nested_path_param_int is not None: + body["nested_path_param_int"] = self.nested_path_param_int + if self.nested_path_param_string is not None: + body["nested_path_param_string"] = self.nested_path_param_string + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Resource: + """Deserializes the Resource from a dictionary.""" + return cls( + any_field=d.get("any_field", None), + body_field=d.get("body_field", None), + nested_path_param_bool=d.get("nested_path_param_bool", None), + nested_path_param_int=d.get("nested_path_param_int", None), + nested_path_param_string=d.get("nested_path_param_string", None), + ) + + +class HttpCallV2API: + """Lorem Ipsum""" + + def __init__(self, api_client): + self._api = api_client + + def create_resource( + self, path_param_string: str, path_param_int: int, path_param_bool: bool, *, body_field: Optional[str] = None + ) -> Resource: + """This mimics "old" style post requests which have the resource inlined. + + :param path_param_string: str + :param path_param_int: int + :param path_param_bool: bool + :param body_field: str (optional) + Body element + + :returns: :class:`Resource` + """ + body = {} + if body_field is not None: + body["body_field"] = body_field + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", + f"/api/2.0/http-call/{path_param_string}/{path_param_int}/{path_param_bool}", + body=body, + headers=headers, + ) + return Resource.from_dict(res) + + def get_resource( + self, + path_param_string: str, + path_param_int: int, + path_param_bool: bool, + *, + field_mask: Optional[FieldMask] = None, + optional_complex_query_param: Optional[ComplexQueryParam] = None, + query_param_bool: Optional[bool] = None, + query_param_int: Optional[int] = None, + query_param_string: Optional[str] = None, + repeated_complex_query_param: Optional[List[ComplexQueryParam]] = None, + repeated_query_param: Optional[List[str]] = None, + ) -> Resource: + + query = {} + if field_mask is not None: + query["field_mask"] = field_mask.ToJsonString() + if optional_complex_query_param is not None: + query["optional_complex_query_param"] = optional_complex_query_param.as_dict() + if query_param_bool is not None: + query["query_param_bool"] = query_param_bool + if query_param_int is not None: + query["query_param_int"] = query_param_int + if query_param_string is not None: + query["query_param_string"] = query_param_string + if repeated_complex_query_param is not None: + query["repeated_complex_query_param"] = [v.as_dict() for v in repeated_complex_query_param] + if repeated_query_param is not None: + query["repeated_query_param"] = [v for v in repeated_query_param] + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", + f"/api/2.0/http-call/{path_param_string}/{path_param_int}/{path_param_bool}", + query=query, + headers=headers, + ) + return Resource.from_dict(res) + + def update_resource( + self, + nested_path_param_string: str, + nested_path_param_int: int, + nested_path_param_bool: bool, + resource: Resource, + *, + field_mask: Optional[FieldMask] = None, + optional_complex_query_param: Optional[ComplexQueryParam] = None, + query_param_bool: Optional[bool] = None, + query_param_int: Optional[int] = None, + query_param_string: Optional[str] = None, + repeated_complex_query_param: Optional[List[ComplexQueryParam]] = None, + repeated_query_param: Optional[List[str]] = None, + ) -> Resource: + """This mimics "new" style post requests which have a body field. + + :param nested_path_param_string: str + :param nested_path_param_int: int + :param nested_path_param_bool: bool + :param resource: :class:`Resource` + Body element + :param field_mask: FieldMask (optional) + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + :param optional_complex_query_param: :class:`ComplexQueryParam` (optional) + :param query_param_bool: bool (optional) + :param query_param_int: int (optional) + :param query_param_string: str (optional) + :param repeated_complex_query_param: List[:class:`ComplexQueryParam`] (optional) + :param repeated_query_param: List[str] (optional) + + :returns: :class:`Resource` + """ + body = resource.as_dict() + query = {} + if field_mask is not None: + query["field_mask"] = field_mask.ToJsonString() + if optional_complex_query_param is not None: + query["optional_complex_query_param"] = optional_complex_query_param.as_dict() + if query_param_bool is not None: + query["query_param_bool"] = query_param_bool + if query_param_int is not None: + query["query_param_int"] = query_param_int + if query_param_string is not None: + query["query_param_string"] = query_param_string + if repeated_complex_query_param is not None: + query["repeated_complex_query_param"] = [v.as_dict() for v in repeated_complex_query_param] + if repeated_query_param is not None: + query["repeated_query_param"] = [v for v in repeated_query_param] + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/http-call/{nested_path_param_string}/{nested_path_param_int}/{nested_path_param_bool}", + query=query, + body=body, + headers=headers, + ) + return Resource.from_dict(res) diff --git a/tests/databricks/sdk/service/idempotencytesting.py b/tests/databricks/sdk/service/idempotencytesting.py new file mode 100755 index 000000000..550795aa2 --- /dev/null +++ b/tests/databricks/sdk/service/idempotencytesting.py @@ -0,0 +1,63 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from typing import Any, Dict, Optional + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class TestResource: + id: Optional[str] = None + + name: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the TestResource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.id is not None: + body["id"] = self.id + if self.name is not None: + body["name"] = self.name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TestResource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.id is not None: + body["id"] = self.id + if self.name is not None: + body["name"] = self.name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TestResource: + """Deserializes the TestResource from a dictionary.""" + return cls(id=d.get("id", None), name=d.get("name", None)) + + +class IdempotencyTestingAPI: + """Test service for Idempotency of Operations""" + + def __init__(self, api_client): + self._api = api_client + + def create_test_resource(self, test_resource: TestResource, *, request_id: Optional[str] = None) -> TestResource: + + body = test_resource.as_dict() + query = {} + if request_id is not None: + query["request_id"] = request_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/idempotency-testing/resources", query=query, body=body, headers=headers) + return TestResource.from_dict(res) diff --git a/tests/databricks/sdk/service/jsonmarshallv2.py b/tests/databricks/sdk/service/jsonmarshallv2.py new file mode 100755 index 000000000..60714b1c1 --- /dev/null +++ b/tests/databricks/sdk/service/jsonmarshallv2.py @@ -0,0 +1,486 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional + +from google.protobuf.duration_pb2 import Duration +from google.protobuf.timestamp_pb2 import Timestamp + +from databricks.sdk.common.types.fieldmask import FieldMask +from databricks.sdk.service._internal import (_duration, _enum, _fieldmask, + _from_dict, _repeated_dict, + _repeated_duration, + _repeated_enum, + _repeated_fieldmask, + _repeated_timestamp, _timestamp) + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class NestedMessage: + optional_duration: Optional[Duration] = None + + optional_string: Optional[str] = None + + optional_timestamp: Optional[Timestamp] = None + + def as_dict(self) -> dict: + """Serializes the NestedMessage into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.optional_duration is not None: + body["optional_duration"] = self.optional_duration.ToJsonString() + if self.optional_string is not None: + body["optional_string"] = self.optional_string + if self.optional_timestamp is not None: + body["optional_timestamp"] = self.optional_timestamp.ToJsonString() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NestedMessage into a shallow dictionary of its immediate attributes.""" + body = {} + if self.optional_duration is not None: + body["optional_duration"] = self.optional_duration + if self.optional_string is not None: + body["optional_string"] = self.optional_string + if self.optional_timestamp is not None: + body["optional_timestamp"] = self.optional_timestamp + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NestedMessage: + """Deserializes the NestedMessage from a dictionary.""" + return cls( + optional_duration=_duration(d, "optional_duration"), + optional_string=d.get("optional_string", None), + optional_timestamp=_timestamp(d, "optional_timestamp"), + ) + + +@dataclass +class OptionalFields: + duration: Optional[Duration] = None + + field_mask: Optional[FieldMask] = None + """The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names.""" + + legacy_duration: Optional[str] = None + """Legacy Well Known types""" + + legacy_field_mask: Optional[str] = None + """The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names.""" + + legacy_timestamp: Optional[str] = None + + list_value: Optional[List[any]] = None + + map: Optional[Dict[str, str]] = None + """Lint disable reason: This is a dummy field used to test SDK Generation logic.""" + + optional_bool: Optional[bool] = None + + optional_int32: Optional[int] = None + + optional_int64: Optional[int] = None + + optional_message: Optional[NestedMessage] = None + + optional_string: Optional[str] = None + + struct: Optional[Dict[str, any]] = None + + test_enum: Optional[TestEnum] = None + + timestamp: Optional[Timestamp] = None + + value: Optional[any] = None + + def as_dict(self) -> dict: + """Serializes the OptionalFields into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.duration is not None: + body["duration"] = self.duration.ToJsonString() + if self.field_mask is not None: + body["field_mask"] = self.field_mask.ToJsonString() + if self.legacy_duration is not None: + body["legacy_duration"] = self.legacy_duration + if self.legacy_field_mask is not None: + body["legacy_field_mask"] = self.legacy_field_mask + if self.legacy_timestamp is not None: + body["legacy_timestamp"] = self.legacy_timestamp + if self.list_value: + body["list_value"] = [v for v in self.list_value] + if self.map: + body["map"] = self.map + if self.optional_bool is not None: + body["optional_bool"] = self.optional_bool + if self.optional_int32 is not None: + body["optional_int32"] = self.optional_int32 + if self.optional_int64 is not None: + body["optional_int64"] = self.optional_int64 + if self.optional_message: + body["optional_message"] = self.optional_message.as_dict() + if self.optional_string is not None: + body["optional_string"] = self.optional_string + if self.struct: + body["struct"] = self.struct + if self.test_enum is not None: + body["test_enum"] = self.test_enum.value + if self.timestamp is not None: + body["timestamp"] = self.timestamp.ToJsonString() + if self.value: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the OptionalFields into a shallow dictionary of its immediate attributes.""" + body = {} + if self.duration is not None: + body["duration"] = self.duration + if self.field_mask is not None: + body["field_mask"] = self.field_mask + if self.legacy_duration is not None: + body["legacy_duration"] = self.legacy_duration + if self.legacy_field_mask is not None: + body["legacy_field_mask"] = self.legacy_field_mask + if self.legacy_timestamp is not None: + body["legacy_timestamp"] = self.legacy_timestamp + if self.list_value: + body["list_value"] = self.list_value + if self.map: + body["map"] = self.map + if self.optional_bool is not None: + body["optional_bool"] = self.optional_bool + if self.optional_int32 is not None: + body["optional_int32"] = self.optional_int32 + if self.optional_int64 is not None: + body["optional_int64"] = self.optional_int64 + if self.optional_message: + body["optional_message"] = self.optional_message + if self.optional_string is not None: + body["optional_string"] = self.optional_string + if self.struct: + body["struct"] = self.struct + if self.test_enum is not None: + body["test_enum"] = self.test_enum + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> OptionalFields: + """Deserializes the OptionalFields from a dictionary.""" + return cls( + duration=_duration(d, "duration"), + field_mask=_fieldmask(d, "field_mask"), + legacy_duration=d.get("legacy_duration", None), + legacy_field_mask=d.get("legacy_field_mask", None), + legacy_timestamp=d.get("legacy_timestamp", None), + list_value=d.get("list_value", None), + map=d.get("map", None), + optional_bool=d.get("optional_bool", None), + optional_int32=d.get("optional_int32", None), + optional_int64=d.get("optional_int64", None), + optional_message=_from_dict(d, "optional_message", NestedMessage), + optional_string=d.get("optional_string", None), + struct=d.get("struct", None), + test_enum=_enum(d, "test_enum", TestEnum), + timestamp=_timestamp(d, "timestamp"), + value=d.get("value", None), + ) + + +@dataclass +class RepeatedFields: + repeated_bool: Optional[List[bool]] = None + + repeated_duration: Optional[List[Duration]] = None + + repeated_field_mask: Optional[List[FieldMask]] = None + + repeated_int32: Optional[List[int]] = None + + repeated_int64: Optional[List[int]] = None + + repeated_list_value: Optional[List[List[any]]] = None + + repeated_message: Optional[List[NestedMessage]] = None + + repeated_string: Optional[List[str]] = None + + repeated_struct: Optional[List[Dict[str, any]]] = None + + repeated_timestamp: Optional[List[Timestamp]] = None + + repeated_value: Optional[List[any]] = None + + test_repeated_enum: Optional[List[TestEnum]] = None + + def as_dict(self) -> dict: + """Serializes the RepeatedFields into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.repeated_bool: + body["repeated_bool"] = [v for v in self.repeated_bool] + if self.repeated_duration: + body["repeated_duration"] = [v.ToJsonString() for v in self.repeated_duration] + if self.repeated_field_mask: + body["repeated_field_mask"] = [v.ToJsonString() for v in self.repeated_field_mask] + if self.repeated_int32: + body["repeated_int32"] = [v for v in self.repeated_int32] + if self.repeated_int64: + body["repeated_int64"] = [v for v in self.repeated_int64] + if self.repeated_list_value: + body["repeated_list_value"] = [v for v in self.repeated_list_value] + if self.repeated_message: + body["repeated_message"] = [v.as_dict() for v in self.repeated_message] + if self.repeated_string: + body["repeated_string"] = [v for v in self.repeated_string] + if self.repeated_struct: + body["repeated_struct"] = [v for v in self.repeated_struct] + if self.repeated_timestamp: + body["repeated_timestamp"] = [v.ToJsonString() for v in self.repeated_timestamp] + if self.repeated_value: + body["repeated_value"] = [v for v in self.repeated_value] + if self.test_repeated_enum: + body["test_repeated_enum"] = [v.value for v in self.test_repeated_enum] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RepeatedFields into a shallow dictionary of its immediate attributes.""" + body = {} + if self.repeated_bool: + body["repeated_bool"] = self.repeated_bool + if self.repeated_duration: + body["repeated_duration"] = self.repeated_duration + if self.repeated_field_mask: + body["repeated_field_mask"] = self.repeated_field_mask + if self.repeated_int32: + body["repeated_int32"] = self.repeated_int32 + if self.repeated_int64: + body["repeated_int64"] = self.repeated_int64 + if self.repeated_list_value: + body["repeated_list_value"] = self.repeated_list_value + if self.repeated_message: + body["repeated_message"] = self.repeated_message + if self.repeated_string: + body["repeated_string"] = self.repeated_string + if self.repeated_struct: + body["repeated_struct"] = self.repeated_struct + if self.repeated_timestamp: + body["repeated_timestamp"] = self.repeated_timestamp + if self.repeated_value: + body["repeated_value"] = self.repeated_value + if self.test_repeated_enum: + body["test_repeated_enum"] = self.test_repeated_enum + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RepeatedFields: + """Deserializes the RepeatedFields from a dictionary.""" + return cls( + repeated_bool=d.get("repeated_bool", None), + repeated_duration=_repeated_duration(d, "repeated_duration"), + repeated_field_mask=_repeated_fieldmask(d, "repeated_field_mask"), + repeated_int32=d.get("repeated_int32", None), + repeated_int64=d.get("repeated_int64", None), + repeated_list_value=d.get("repeated_list_value", None), + repeated_message=_repeated_dict(d, "repeated_message", NestedMessage), + repeated_string=d.get("repeated_string", None), + repeated_struct=d.get("repeated_struct", None), + repeated_timestamp=_repeated_timestamp(d, "repeated_timestamp"), + repeated_value=d.get("repeated_value", None), + test_repeated_enum=_repeated_enum(d, "test_repeated_enum", TestEnum), + ) + + +@dataclass +class RequiredFields: + required_string: str + + required_int32: int + + required_int64: int + + required_bool: bool + + required_message: NestedMessage + + test_required_enum: TestEnum + + required_duration: Duration + + required_field_mask: FieldMask + """The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names.""" + + required_timestamp: Timestamp + + required_value: any + + required_list_value: List[any] + + required_struct: Dict[str, any] + + def as_dict(self) -> dict: + """Serializes the RequiredFields into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.required_bool is not None: + body["required_bool"] = self.required_bool + if self.required_duration is not None: + body["required_duration"] = self.required_duration.ToJsonString() + if self.required_field_mask is not None: + body["required_field_mask"] = self.required_field_mask.ToJsonString() + if self.required_int32 is not None: + body["required_int32"] = self.required_int32 + if self.required_int64 is not None: + body["required_int64"] = self.required_int64 + if self.required_list_value: + body["required_list_value"] = [v for v in self.required_list_value] + if self.required_message: + body["required_message"] = self.required_message.as_dict() + if self.required_string is not None: + body["required_string"] = self.required_string + if self.required_struct: + body["required_struct"] = self.required_struct + if self.required_timestamp is not None: + body["required_timestamp"] = self.required_timestamp.ToJsonString() + if self.required_value: + body["required_value"] = self.required_value + if self.test_required_enum is not None: + body["test_required_enum"] = self.test_required_enum.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RequiredFields into a shallow dictionary of its immediate attributes.""" + body = {} + if self.required_bool is not None: + body["required_bool"] = self.required_bool + if self.required_duration is not None: + body["required_duration"] = self.required_duration + if self.required_field_mask is not None: + body["required_field_mask"] = self.required_field_mask + if self.required_int32 is not None: + body["required_int32"] = self.required_int32 + if self.required_int64 is not None: + body["required_int64"] = self.required_int64 + if self.required_list_value: + body["required_list_value"] = self.required_list_value + if self.required_message: + body["required_message"] = self.required_message + if self.required_string is not None: + body["required_string"] = self.required_string + if self.required_struct: + body["required_struct"] = self.required_struct + if self.required_timestamp is not None: + body["required_timestamp"] = self.required_timestamp + if self.required_value: + body["required_value"] = self.required_value + if self.test_required_enum is not None: + body["test_required_enum"] = self.test_required_enum + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RequiredFields: + """Deserializes the RequiredFields from a dictionary.""" + return cls( + required_bool=d.get("required_bool", None), + required_duration=_duration(d, "required_duration"), + required_field_mask=_fieldmask(d, "required_field_mask"), + required_int32=d.get("required_int32", None), + required_int64=d.get("required_int64", None), + required_list_value=d.get("required_list_value", None), + required_message=_from_dict(d, "required_message", NestedMessage), + required_string=d.get("required_string", None), + required_struct=d.get("required_struct", None), + required_timestamp=_timestamp(d, "required_timestamp"), + required_value=d.get("required_value", None), + test_required_enum=_enum(d, "test_required_enum", TestEnum), + ) + + +@dataclass +class Resource: + """We separate this into 3 submessages to simplify test cases. E.g., any required top level field + needs to be included in the expected json for each test case.""" + + optional_fields: Optional[OptionalFields] = None + + repeated_fields: Optional[RepeatedFields] = None + + required_fields: Optional[RequiredFields] = None + + def as_dict(self) -> dict: + """Serializes the Resource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.optional_fields: + body["optional_fields"] = self.optional_fields.as_dict() + if self.repeated_fields: + body["repeated_fields"] = self.repeated_fields.as_dict() + if self.required_fields: + body["required_fields"] = self.required_fields.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Resource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.optional_fields: + body["optional_fields"] = self.optional_fields + if self.repeated_fields: + body["repeated_fields"] = self.repeated_fields + if self.required_fields: + body["required_fields"] = self.required_fields + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Resource: + """Deserializes the Resource from a dictionary.""" + return cls( + optional_fields=_from_dict(d, "optional_fields", OptionalFields), + repeated_fields=_from_dict(d, "repeated_fields", RepeatedFields), + required_fields=_from_dict(d, "required_fields", RequiredFields), + ) + + +class TestEnum(Enum): + + TEST_ENUM_ONE = "TEST_ENUM_ONE" + TEST_ENUM_TWO = "TEST_ENUM_TWO" + + +class JsonMarshallV2API: + """Lorem Ipsum""" + + def __init__(self, api_client): + self._api = api_client + + def get_resource(self, name: str, resource: Resource) -> Resource: + + query = {} + if resource is not None: + query["resource"] = resource.as_dict() + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/json-marshall/{name}", query=query, headers=headers) + return Resource.from_dict(res) diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py new file mode 100755 index 000000000..6a4325b53 --- /dev/null +++ b/tests/databricks/sdk/service/lrotesting.py @@ -0,0 +1,444 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from datetime import timedelta +from enum import Enum +from typing import Any, Dict, List, Optional + +from databricks.sdk.common import lro +from databricks.sdk.retries import RetryError, poll +from databricks.sdk.service._internal import _enum, _from_dict + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class DatabricksServiceExceptionWithDetailsProto: + """Serialization format for DatabricksServiceException with error details. This message doesn't + work for ScalaPB-04 as google.protobuf.Any is only available to ScalaPB-09. Note the definition + of this message should be in sync with DatabricksServiceExceptionProto defined in + /api-base/proto/legacy/databricks.proto except the later one doesn't have the error details + field defined.""" + + details: Optional[List[dict]] = None + """@pbjson-skip""" + + error_code: Optional[ErrorCode] = None + + message: Optional[str] = None + + stack_trace: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the DatabricksServiceExceptionWithDetailsProto into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.details: + body["details"] = [v for v in self.details] + if self.error_code is not None: + body["error_code"] = self.error_code.value + if self.message is not None: + body["message"] = self.message + if self.stack_trace is not None: + body["stack_trace"] = self.stack_trace + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabricksServiceExceptionWithDetailsProto into a shallow dictionary of its immediate attributes.""" + body = {} + if self.details: + body["details"] = self.details + if self.error_code is not None: + body["error_code"] = self.error_code + if self.message is not None: + body["message"] = self.message + if self.stack_trace is not None: + body["stack_trace"] = self.stack_trace + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabricksServiceExceptionWithDetailsProto: + """Deserializes the DatabricksServiceExceptionWithDetailsProto from a dictionary.""" + return cls( + details=d.get("details", None), + error_code=_enum(d, "error_code", ErrorCode), + message=d.get("message", None), + stack_trace=d.get("stack_trace", None), + ) + + +class ErrorCode(Enum): + """Legacy definition of the ErrorCode enum. Please keep in sync with + api-base/proto/error_code.proto (except status code mapping annotations as this file doesn't + have them). Will be removed eventually, pending the ScalaPB 0.4 cleanup.""" + + ABORTED = "ABORTED" + ALREADY_EXISTS = "ALREADY_EXISTS" + BAD_REQUEST = "BAD_REQUEST" + CANCELLED = "CANCELLED" + CATALOG_ALREADY_EXISTS = "CATALOG_ALREADY_EXISTS" + CATALOG_DOES_NOT_EXIST = "CATALOG_DOES_NOT_EXIST" + CATALOG_NOT_EMPTY = "CATALOG_NOT_EMPTY" + COULD_NOT_ACQUIRE_LOCK = "COULD_NOT_ACQUIRE_LOCK" + CUSTOMER_UNAUTHORIZED = "CUSTOMER_UNAUTHORIZED" + DAC_ALREADY_EXISTS = "DAC_ALREADY_EXISTS" + DAC_DOES_NOT_EXIST = "DAC_DOES_NOT_EXIST" + DATA_LOSS = "DATA_LOSS" + DEADLINE_EXCEEDED = "DEADLINE_EXCEEDED" + DEPLOYMENT_TIMEOUT = "DEPLOYMENT_TIMEOUT" + DIRECTORY_NOT_EMPTY = "DIRECTORY_NOT_EMPTY" + DIRECTORY_PROTECTED = "DIRECTORY_PROTECTED" + DRY_RUN_FAILED = "DRY_RUN_FAILED" + ENDPOINT_NOT_FOUND = "ENDPOINT_NOT_FOUND" + EXTERNAL_LOCATION_ALREADY_EXISTS = "EXTERNAL_LOCATION_ALREADY_EXISTS" + EXTERNAL_LOCATION_DOES_NOT_EXIST = "EXTERNAL_LOCATION_DOES_NOT_EXIST" + FEATURE_DISABLED = "FEATURE_DISABLED" + GIT_CONFLICT = "GIT_CONFLICT" + GIT_REMOTE_ERROR = "GIT_REMOTE_ERROR" + GIT_SENSITIVE_TOKEN_DETECTED = "GIT_SENSITIVE_TOKEN_DETECTED" + GIT_UNKNOWN_REF = "GIT_UNKNOWN_REF" + GIT_URL_NOT_ON_ALLOW_LIST = "GIT_URL_NOT_ON_ALLOW_LIST" + INSECURE_PARTNER_RESPONSE = "INSECURE_PARTNER_RESPONSE" + INTERNAL_ERROR = "INTERNAL_ERROR" + INVALID_PARAMETER_VALUE = "INVALID_PARAMETER_VALUE" + INVALID_STATE = "INVALID_STATE" + INVALID_STATE_TRANSITION = "INVALID_STATE_TRANSITION" + IO_ERROR = "IO_ERROR" + IPYNB_FILE_IN_REPO = "IPYNB_FILE_IN_REPO" + MALFORMED_PARTNER_RESPONSE = "MALFORMED_PARTNER_RESPONSE" + MALFORMED_REQUEST = "MALFORMED_REQUEST" + MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST = "MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST" + MAX_BLOCK_SIZE_EXCEEDED = "MAX_BLOCK_SIZE_EXCEEDED" + MAX_CHILD_NODE_SIZE_EXCEEDED = "MAX_CHILD_NODE_SIZE_EXCEEDED" + MAX_LIST_SIZE_EXCEEDED = "MAX_LIST_SIZE_EXCEEDED" + MAX_NOTEBOOK_SIZE_EXCEEDED = "MAX_NOTEBOOK_SIZE_EXCEEDED" + MAX_READ_SIZE_EXCEEDED = "MAX_READ_SIZE_EXCEEDED" + METASTORE_ALREADY_EXISTS = "METASTORE_ALREADY_EXISTS" + METASTORE_DOES_NOT_EXIST = "METASTORE_DOES_NOT_EXIST" + METASTORE_NOT_EMPTY = "METASTORE_NOT_EMPTY" + NOT_FOUND = "NOT_FOUND" + NOT_IMPLEMENTED = "NOT_IMPLEMENTED" + PARTIAL_DELETE = "PARTIAL_DELETE" + PERMISSION_DENIED = "PERMISSION_DENIED" + PERMISSION_NOT_PROPAGATED = "PERMISSION_NOT_PROPAGATED" + PRINCIPAL_DOES_NOT_EXIST = "PRINCIPAL_DOES_NOT_EXIST" + PROJECTS_OPERATION_TIMEOUT = "PROJECTS_OPERATION_TIMEOUT" + PROVIDER_ALREADY_EXISTS = "PROVIDER_ALREADY_EXISTS" + PROVIDER_DOES_NOT_EXIST = "PROVIDER_DOES_NOT_EXIST" + PROVIDER_SHARE_NOT_ACCESSIBLE = "PROVIDER_SHARE_NOT_ACCESSIBLE" + QUOTA_EXCEEDED = "QUOTA_EXCEEDED" + RECIPIENT_ALREADY_EXISTS = "RECIPIENT_ALREADY_EXISTS" + RECIPIENT_DOES_NOT_EXIST = "RECIPIENT_DOES_NOT_EXIST" + REQUEST_LIMIT_EXCEEDED = "REQUEST_LIMIT_EXCEEDED" + RESOURCE_ALREADY_EXISTS = "RESOURCE_ALREADY_EXISTS" + RESOURCE_CONFLICT = "RESOURCE_CONFLICT" + RESOURCE_DOES_NOT_EXIST = "RESOURCE_DOES_NOT_EXIST" + RESOURCE_EXHAUSTED = "RESOURCE_EXHAUSTED" + RESOURCE_LIMIT_EXCEEDED = "RESOURCE_LIMIT_EXCEEDED" + SCHEMA_ALREADY_EXISTS = "SCHEMA_ALREADY_EXISTS" + SCHEMA_DOES_NOT_EXIST = "SCHEMA_DOES_NOT_EXIST" + SCHEMA_NOT_EMPTY = "SCHEMA_NOT_EMPTY" + SEARCH_QUERY_TOO_LONG = "SEARCH_QUERY_TOO_LONG" + SEARCH_QUERY_TOO_SHORT = "SEARCH_QUERY_TOO_SHORT" + SERVICE_UNDER_MAINTENANCE = "SERVICE_UNDER_MAINTENANCE" + SHARE_ALREADY_EXISTS = "SHARE_ALREADY_EXISTS" + SHARE_DOES_NOT_EXIST = "SHARE_DOES_NOT_EXIST" + STORAGE_CREDENTIAL_ALREADY_EXISTS = "STORAGE_CREDENTIAL_ALREADY_EXISTS" + STORAGE_CREDENTIAL_DOES_NOT_EXIST = "STORAGE_CREDENTIAL_DOES_NOT_EXIST" + TABLE_ALREADY_EXISTS = "TABLE_ALREADY_EXISTS" + TABLE_DOES_NOT_EXIST = "TABLE_DOES_NOT_EXIST" + TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE" + UNAUTHENTICATED = "UNAUTHENTICATED" + UNAVAILABLE = "UNAVAILABLE" + UNKNOWN = "UNKNOWN" + UNPARSEABLE_HTTP_ERROR = "UNPARSEABLE_HTTP_ERROR" + WORKSPACE_TEMPORARILY_UNAVAILABLE = "WORKSPACE_TEMPORARILY_UNAVAILABLE" + + +@dataclass +class Operation: + """This resource represents a long-running operation that is the result of a network API call.""" + + done: Optional[bool] = None + """If the value is `false`, it means the operation is still in progress. If `true`, the operation + is completed, and either `error` or `response` is available.""" + + error: Optional[DatabricksServiceExceptionWithDetailsProto] = None + """The error result of the operation in case of failure or cancellation.""" + + metadata: Optional[dict] = None + """Service-specific metadata associated with the operation. It typically contains progress + information and common metadata such as create time. Some services might not provide such + metadata. Any method that returns a long-running operation should document the metadata type, if + any.""" + + name: Optional[str] = None + """The server-assigned name, which is only unique within the same service that originally returns + it. If you use the default HTTP mapping, the `name` should be a resource name ending with + `operations/{unique_id}`. + + Note: multi-segment resource names are not yet supported in the RPC framework and SDK/TF. Until + that support is added, `name` must be string without internal `/` separators.""" + + response: Optional[dict] = None + """The normal, successful response of the operation. If the original method returns no data on + success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is + standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the + response should have the type `XxxResponse`, where `Xxx` is the original method name. For + example, if the original method name is `TakeSnapshot()`, the inferred response type is + `TakeSnapshotResponse`.""" + + def as_dict(self) -> dict: + """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.done is not None: + body["done"] = self.done + if self.error: + body["error"] = self.error.as_dict() + if self.metadata: + body["metadata"] = self.metadata + if self.name is not None: + body["name"] = self.name + if self.response: + body["response"] = self.response + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Operation into a shallow dictionary of its immediate attributes.""" + body = {} + if self.done is not None: + body["done"] = self.done + if self.error: + body["error"] = self.error + if self.metadata: + body["metadata"] = self.metadata + if self.name is not None: + body["name"] = self.name + if self.response: + body["response"] = self.response + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Operation: + """Deserializes the Operation from a dictionary.""" + return cls( + done=d.get("done", None), + error=_from_dict(d, "error", DatabricksServiceExceptionWithDetailsProto), + metadata=d.get("metadata", None), + name=d.get("name", None), + response=d.get("response", None), + ) + + +@dataclass +class TestResource: + """Test resource for LRO operations""" + + id: Optional[str] = None + """Unique identifier for the resource""" + + name: Optional[str] = None + """Name of the resource""" + + def as_dict(self) -> dict: + """Serializes the TestResource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.id is not None: + body["id"] = self.id + if self.name is not None: + body["name"] = self.name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TestResource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.id is not None: + body["id"] = self.id + if self.name is not None: + body["name"] = self.name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TestResource: + """Deserializes the TestResource from a dictionary.""" + return cls(id=d.get("id", None), name=d.get("name", None)) + + +@dataclass +class TestResourceOperationMetadata: + """Metadata for test resource operations""" + + progress_percent: Optional[int] = None + """Progress percentage (0-100)""" + + resource_id: Optional[str] = None + """ID of the resource being operated on""" + + def as_dict(self) -> dict: + """Serializes the TestResourceOperationMetadata into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.progress_percent is not None: + body["progress_percent"] = self.progress_percent + if self.resource_id is not None: + body["resource_id"] = self.resource_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TestResourceOperationMetadata into a shallow dictionary of its immediate attributes.""" + body = {} + if self.progress_percent is not None: + body["progress_percent"] = self.progress_percent + if self.resource_id is not None: + body["resource_id"] = self.resource_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TestResourceOperationMetadata: + """Deserializes the TestResourceOperationMetadata from a dictionary.""" + return cls(progress_percent=d.get("progress_percent", None), resource_id=d.get("resource_id", None)) + + +class LroTestingAPI: + """Test service for Long Running Operations""" + + def __init__(self, api_client): + self._api = api_client + + def cancel_operation(self, name: str): + + headers = { + "Accept": "application/json", + } + + self._api.do("POST", f"/api/2.0/lro-testing/operations/{name}/cancel", headers=headers) + + def create_test_resource(self, resource: TestResource) -> CreateTestResourceOperation: + """Simple method to create test resource for LRO testing + + :param resource: :class:`TestResource` + The resource to create + + :returns: :class:`Operation` + """ + body = resource.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/lro-testing/resources", body=body, headers=headers) + operation = Operation.from_dict(res) + return CreateTestResourceOperation(self, operation) + + def get_operation(self, name: str) -> Operation: + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/lro-testing/operations/{name}", headers=headers) + return Operation.from_dict(res) + + def get_test_resource(self, resource_id: str) -> TestResource: + """Simple method to get test resource + + :param resource_id: str + Resource ID to get + + :returns: :class:`TestResource` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/lro-testing/resources/{resource_id}", headers=headers) + return TestResource.from_dict(res) + + +class CreateTestResourceOperation: + """Long-running operation for create_test_resource""" + + def __init__(self, impl: LroTestingAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> TestResource: + """Wait blocks until the long-running operation is completed with default 20 min + timeout. If the operation didn't finish within the timeout, this function will + raise an error of type TimeoutError, otherwise returns successful response and + any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: 20 minutes) + + :returns: :class:`TestResource` + """ + + def poll_operation(): + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + test_resource = TestResource.from_dict(operation.response) + + return test_resource, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + + def cancel(self): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not guaranteed. + """ + self._impl.cancel_operation(name=self._operation.name) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> TestResourceOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`TestResourceOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return TestResourceOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done diff --git a/tests/generated/test_http_call.py b/tests/generated/test_http_call.py new file mode 100755 index 000000000..eecb6cf9c --- /dev/null +++ b/tests/generated/test_http_call.py @@ -0,0 +1,310 @@ +# Code generated by Databricks SDK Generator. DO NOT EDIT. + +import pytest +from google.protobuf.duration_pb2 import Duration +from google.protobuf.timestamp_pb2 import Timestamp + +import databricks.sdk.core as client +from databricks.sdk.common.types.fieldmask import FieldMask +from tests.databricks.sdk.service.httpcallv2 import (ComplexQueryParam, + HttpCallV2API, Resource) + + +# Helper methods for well known types +def _duration(d: str) -> Duration: + dur = Duration() + dur.FromJsonString(d) + return dur + + +def _timestamp(d: str) -> Timestamp: + ts = Timestamp() + ts.FromJsonString(d) + return ts + + +def _fieldmask(d: str) -> FieldMask: + fm = FieldMask() + fm.FromJsonString(d) + return fm + + +@pytest.mark.parametrize( + "apply_mock,make_call", + [ + ( + lambda requests_mock: requests_mock.post("http://localhost/api/2.0/http-call/string_val/123/true"), + lambda client: client.create_resource( + path_param_string="string_val", + path_param_int=123, + path_param_bool=True, + ), + ), + ( + lambda requests_mock: requests_mock.post("http://localhost/api/2.0/http-call/test_string/456/false"), + lambda client: client.create_resource( + body_field="request_body_content", + path_param_string="test_string", + path_param_int=456, + path_param_bool=False, + ), + ), + ( + lambda requests_mock: requests_mock.patch("http://localhost/api/2.0/http-call/update_string/789/true"), + lambda client: client.update_resource( + resource=Resource( + any_field={"key": "value"}, + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + ), + ), + ( + lambda requests_mock: requests_mock.patch("http://localhost/api/2.0/http-call/update_string/789/true"), + lambda client: client.update_resource( + resource=Resource( + body_field="request_body_content", + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + ), + ), + ( + lambda requests_mock: requests_mock.patch( + "http://localhost/api/2.0/http-call/update_string/789/true?field_mask=field.mask.value&query_param_bool=true&query_param_int=999&query_param_string=query_string_val" + ), + lambda client: client.update_resource( + resource=Resource( + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + query_param_string="query_string_val", + query_param_int=999, + query_param_bool=True, + field_mask=_fieldmask("field.mask.value"), + ), + ), + ( + lambda requests_mock: requests_mock.patch( + "http://localhost/api/2.0/http-call/update_string/789/True?optional_complex_query_param.nested_optional_query_param=nested_optional" + ), + lambda client: client.update_resource( + resource=Resource( + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + optional_complex_query_param=ComplexQueryParam( + nested_optional_query_param="nested_optional", + ), + ), + ), + ( + lambda requests_mock: requests_mock.patch( + "http://localhost/api/2.0/http-call/update_string/789/True?repeated_query_param=item1&repeated_query_param=item2&repeated_query_param=item3" + ), + lambda client: client.update_resource( + resource=Resource( + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + repeated_query_param=[ + "item1", + "item2", + "item3", + ], + ), + ), + ( + lambda requests_mock: requests_mock.patch( + "http://localhost/api/2.0/http-call/update_string/789/True?optional_complex_query_param.nested_repeated_query_param=item1&optional_complex_query_param.nested_repeated_query_param=item2&optional_complex_query_param.nested_repeated_query_param=item3" + ), + lambda client: client.update_resource( + resource=Resource( + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + optional_complex_query_param=ComplexQueryParam( + nested_repeated_query_param=[ + "item1", + "item2", + "item3", + ], + ), + ), + ), + ( + lambda requests_mock: requests_mock.patch( + "http://localhost/api/2.0/http-call/update_string/789/True?repeated_complex_query_param=nested_repeated_query_param&repeated_complex_query_param=nested_repeated_query_param" + ), + lambda client: client.update_resource( + resource=Resource( + nested_path_param_bool=True, + nested_path_param_int=789, + nested_path_param_string="update_string", + ), + nested_path_param_string="update_string", + nested_path_param_int=789, + nested_path_param_bool=True, + repeated_complex_query_param=[ + ComplexQueryParam( + nested_repeated_query_param=[ + "item1", + "item2", + "item3", + ], + ), + ComplexQueryParam( + nested_repeated_query_param=[ + "item4", + "item5", + "item6", + ], + ), + ], + ), + ), + ( + lambda requests_mock: requests_mock.get("http://localhost/api/2.0/http-call/get_string/123/true?"), + lambda client: client.get_resource( + path_param_string="get_string", + path_param_int=123, + path_param_bool=True, + ), + ), + ( + lambda requests_mock: requests_mock.get( + "http://localhost/api/2.0/http-call/get_string/456/false?field_mask=field.mask.value&query_param_bool=true&query_param_int=999&query_param_string=query_string_val" + ), + lambda client: client.get_resource( + path_param_string="get_string", + path_param_int=456, + path_param_bool=False, + query_param_string="query_string_val", + query_param_int=999, + query_param_bool=True, + field_mask=_fieldmask("field.mask.value"), + ), + ), + ( + lambda requests_mock: requests_mock.get( + "http://localhost/api/2.0/http-call/get_string/789/true?optional_complex_query_param.nested_optional_query_param=nested_optional" + ), + lambda client: client.get_resource( + path_param_string="get_string", + path_param_int=789, + path_param_bool=True, + optional_complex_query_param=ComplexQueryParam( + nested_optional_query_param="nested_optional", + ), + ), + ), + ( + lambda requests_mock: requests_mock.get( + "http://localhost/api/2.0/http-call/get_string/101/false?repeated_query_param=item1&repeated_query_param=item2&repeated_query_param=item3" + ), + lambda client: client.get_resource( + path_param_string="get_string", + path_param_int=101, + path_param_bool=False, + repeated_query_param=[ + "item1", + "item2", + "item3", + ], + ), + ), + ( + lambda requests_mock: requests_mock.get( + "http://localhost/api/2.0/http-call/get_string/202/true?optional_complex_query_param.nested_repeated_query_param=item1&optional_complex_query_param.nested_repeated_query_param=item2&optional_complex_query_param.nested_repeated_query_param=item3" + ), + lambda client: client.get_resource( + path_param_string="get_string", + path_param_int=202, + path_param_bool=True, + optional_complex_query_param=ComplexQueryParam( + nested_repeated_query_param=[ + "item1", + "item2", + "item3", + ], + ), + ), + ), + ( + lambda requests_mock: requests_mock.get( + "http://localhost/api/2.0/http-call/get_string/303/False?repeated_complex_query_param=nested_repeated_query_param&repeated_complex_query_param=nested_repeated_query_param" + ), + lambda client: client.get_resource( + path_param_string="get_string", + path_param_int=303, + path_param_bool=False, + repeated_complex_query_param=[ + ComplexQueryParam( + nested_repeated_query_param=[ + "item1", + "item2", + "item3", + ], + ), + ComplexQueryParam( + nested_repeated_query_param=[ + "item4", + "item5", + "item6", + ], + ), + ], + ), + ), + ], + ids=[ + "LegacyHttpPostNoQueryParamsNoBody", + "LegacyHttpPostWithBody", + "UpdateResourceNoQueryParamsNoBody", + "UpdateResourceWithBody", + "UpdateResourceWithSimpleQueryParams", + "UpdateResourceWithOneNestedQueryParam", + "UpdateResourceWithRepeatedQueryParam", + "UpdateResourceWithRepeatedNestedQueryParam", + "UpdateResourceWithDoubleRepeatedNestedQueryParam", + "GetResourceNoQueryParams", + "GetResourceWithSimpleQueryParams", + "GetResourceWithOneNestedQueryParam", + "GetResourceWithRepeatedQueryParam", + "GetResourceWithRepeatedNestedQueryParam", + "GetResourceWithDoubleRepeatedNestedQueryParam", + ], +) +def test_http_call(config, requests_mock, apply_mock, make_call): + apply_mock(requests_mock) + + api_client = client.ApiClient(config) + c = HttpCallV2API(api_client) + make_call(c) + + assert requests_mock.call_count == 1 + assert requests_mock.called diff --git a/tests/generated/test_json_marshall.py b/tests/generated/test_json_marshall.py new file mode 100755 index 000000000..16fc6fb26 --- /dev/null +++ b/tests/generated/test_json_marshall.py @@ -0,0 +1,440 @@ +# Code generated by Databricks SDK Generator. DO NOT EDIT. + +import json +from typing import Any + +import pytest +from google.protobuf.duration_pb2 import Duration +from google.protobuf.timestamp_pb2 import Timestamp + +from databricks.sdk.common.types.fieldmask import FieldMask +from tests.databricks.sdk.service.jsonmarshallv2 import (NestedMessage, + OptionalFields, + RepeatedFields, + RequiredFields, + TestEnum) + + +# Helper methods for well known types +def _duration(d: str) -> Duration: + dur = Duration() + dur.FromJsonString(d) + return dur + + +def _timestamp(d: str) -> Timestamp: + ts = Timestamp() + ts.FromJsonString(d) + return ts + + +def _fieldmask(d: str) -> FieldMask: + fm = FieldMask() + fm.FromJsonString(d) + return fm + + +@pytest.mark.parametrize( + "from_dict_method,instance,expected_json", + [ + ( + OptionalFields.from_dict, + OptionalFields( + optional_string="test", + ), + """{ + "optional_string": "test" + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + optional_int32=42, + ), + """{ + "optional_int32": 42 + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + optional_int64=9223372036854775807, + ), + """{ + "optional_int64": 9223372036854775807 + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + optional_bool=True, + ), + """{ + "optional_bool": true + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + test_enum=TestEnum.TEST_ENUM_ONE, + ), + """{ + "test_enum": "TEST_ENUM_ONE" + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + optional_message=NestedMessage( + optional_string="nested_value", + ), + ), + """{ + "optional_message": { + "optional_string": "nested_value" + } + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + map={ + "key": "test_key", + "value": "test_value", + }, + ), + """{ + "map": { + "key": "test_key", + "value": "test_value" + } + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + duration=_duration("3600s"), + ), + """{ + "duration": "3600s" + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + field_mask=_fieldmask("optional_string,optional_int32"), + ), + """{ + "field_mask": "optional_string,optional_int32" + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + timestamp=_timestamp("2023-01-01T00:00:00Z"), + ), + """{ + "timestamp": "2023-01-01T00:00:00Z" + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + optional_bool=True, + optional_int32=42, + optional_string="test", + ), + """{ + "optional_string":"test", + "optional_int32":42, + "optional_bool":true + }""", + ), + ( + RequiredFields.from_dict, + RequiredFields( + required_bool=False, + required_duration=_duration("0s"), + required_field_mask=None, + required_int32=0, + required_int64=0, + required_list_value=[], + required_message=NestedMessage(), + required_string="", + required_struct={}, + required_timestamp=_timestamp("1970-01-01T00:00:00Z"), + required_value=json.loads("{}"), + test_required_enum=TestEnum.TEST_ENUM_ONE, + ), + """{ + "required_string": "", + "required_int32": 0, + "required_int64": 0, + "required_bool": false, + "required_message": {}, + "test_required_enum": "TEST_ENUM_ONE", + "required_duration": "0s", + "required_timestamp": "1970-01-01T00:00:00Z" + }""", + ), + ( + RequiredFields.from_dict, + RequiredFields( + required_bool=True, + required_duration=_duration("7200s"), + required_field_mask=_fieldmask("required_string,required_int32"), + required_int32=42, + required_int64=1234567890123456789, + required_list_value=[], + required_message=NestedMessage(), + required_string="non_default_string", + required_struct={}, + required_timestamp=_timestamp("2023-12-31T23:59:59Z"), + required_value=json.loads("{}"), + test_required_enum=TestEnum.TEST_ENUM_TWO, + ), + """{ + "required_string": "non_default_string", + "required_int32": 42, + "required_int64": 1234567890123456789, + "required_bool": true, + "required_message": {}, + "test_required_enum": "TEST_ENUM_TWO", + "required_duration": "7200s", + "required_field_mask": "required_string,required_int32", + "required_timestamp": "2023-12-31T23:59:59Z" + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_string=[ + "item1", + "item2", + "item3", + ], + ), + """{ + "repeated_string": ["item1", "item2", "item3"] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_int32=[ + 1, + 2, + 3, + 4, + 5, + ], + ), + """{ + "repeated_int32": [1, 2, 3, 4, 5] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_int64=[ + 1000000000000000000, + 2000000000000000000, + ], + ), + """{ + "repeated_int64": [1000000000000000000, 2000000000000000000] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_bool=[ + True, + False, + True, + ], + ), + """{ + "repeated_bool": [true, false, true] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + test_repeated_enum=[ + TestEnum.TEST_ENUM_ONE, + TestEnum.TEST_ENUM_TWO, + ], + ), + """{ + "test_repeated_enum": ["TEST_ENUM_ONE", "TEST_ENUM_TWO"] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_message=[ + NestedMessage( + optional_string="nested1", + ), + NestedMessage( + optional_string="nested2", + ), + ], + ), + """{ + "repeated_message": [ + { + "optional_string": "nested1" + }, + { + "optional_string": "nested2" + } + ] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_duration=[ + _duration("60s"), + _duration("120s"), + _duration("180s"), + ], + ), + """{ + "repeated_duration": ["60s", "120s", "180s"] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_field_mask=[ + _fieldmask("field1"), + _fieldmask("field2,field3"), + ], + ), + """{ + "repeated_field_mask": ["field1", "field2,field3"] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_timestamp=[ + _timestamp("2023-01-01T00:00:00Z"), + _timestamp("2023-01-02T00:00:00Z"), + ], + ), + """{ + "repeated_timestamp": ["2023-01-01T00:00:00Z", "2023-01-02T00:00:00Z"] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_bool=[ + True, + False, + ], + repeated_int32=[ + 10, + 20, + 30, + ], + repeated_string=[ + "a", + "b", + "c", + ], + ), + """{ + "repeated_string": ["a", "b", "c"], + "repeated_int32": [10, 20, 30], + "repeated_bool": [true, false] + }""", + ), + ( + RepeatedFields.from_dict, + RepeatedFields( + repeated_string=[], + ), + """{}""", + ), + ( + OptionalFields.from_dict, + OptionalFields(), + """{}""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + optional_bool=False, + optional_int32=0, + optional_int64=0, + optional_string="", + ), + """{ + "optional_int32": 0, + "optional_int64": 0, + "optional_bool": false, + "optional_string": "" + }""", + ), + ( + OptionalFields.from_dict, + OptionalFields( + legacy_duration="1s", + legacy_field_mask="legacy_duration,legacy_timestamp", + legacy_timestamp="2023-01-01T00:00:00Z", + ), + """{ + "legacy_duration": "1s", + "legacy_timestamp": "2023-01-01T00:00:00Z", + "legacy_field_mask": "legacy_duration,legacy_timestamp" + }""", + ), + ], + ids=[ + "OptionalString", + "OptionalInt32", + "OptionalInt64", + "OptionalBool", + "OptionalEnum", + "OptionalNestedMessage", + "OptionalMap", + "OptionalDuration", + "OptionalFieldMask", + "OptionalTimestamp", + "MultipleOptionalFields", + "RequiredFieldsExplicitDefaults", + "RequiredFieldsNonDefaults", + "RepeatedString", + "RepeatedInt32", + "RepeatedInt64", + "RepeatedBool", + "RepeatedEnum", + "RepeatedNestedMessage", + "RepeatedDuration", + "RepeatedFieldMask", + "RepeatedTimestamp", + "MultipleRepeatedFields", + "EmptyRepeatedFields", + "OptionalFieldsNoInput", + "OptionalFieldsZeroValues", + "LegacyWellKnownTypes", + ], +) +def test_python_marshall(from_dict_method: any, instance: Any, expected_json: str): + """Test Python object to dict conversion""" + + result = instance.as_dict() + expected_dict = json.loads(expected_json) + + assert result == expected_dict, f"Expected {expected_dict}, but got {result}" + + recreated = from_dict_method(result) + + final_dict = recreated.as_dict() + + assert final_dict == expected_dict, f"Expected {expected_dict}, but got {final_dict}" diff --git a/tests/generated/test_lro_call.py b/tests/generated/test_lro_call.py new file mode 100755 index 000000000..30a97b56a --- /dev/null +++ b/tests/generated/test_lro_call.py @@ -0,0 +1,298 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from datetime import timedelta +from typing import Any, Dict + +import pytest + +import databricks.sdk.core as client +from databricks.sdk.common import lro +from tests.databricks.sdk.service.lrotesting import ( + DatabricksServiceExceptionWithDetailsProto, ErrorCode, LroTestingAPI, + Operation, TestResource, TestResourceOperationMetadata) + + +@pytest.mark.parametrize( + "fixtures,want_result,want_err", + [ + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + { + "method": "GET", + "url": "http://localhost/api/2.0/lro-testing/operations/operations/test-resource-create-12345", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 75}, + name="operations/test-resource-create-12345", + ), + }, + { + "method": "GET", + "url": "http://localhost/api/2.0/lro-testing/operations/operations/test-resource-create-12345", + "response": Operation( + done=True, + metadata={"resource_id": "test-resource-123", "progress_percent": 100}, + name="operations/test-resource-create-12345", + response={"id": "test-resource-123", "name": "test-resource"}, + ), + }, + ], + TestResource( + id="test-resource-123", + name="test-resource", + ), + False, + id="Success", + ), + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + { + "method": "GET", + "url": "http://localhost/api/2.0/lro-testing/operations/operations/test-resource-create-12345", + "response": Operation( + done=True, + error=DatabricksServiceExceptionWithDetailsProto( + error_code=ErrorCode.INTERNAL_ERROR, + message="Test error message", + ), + name="operations/test-resource-create-12345", + ), + }, + ], + None, + True, + id="Error", + ), + ], +) +def test_lro_create_test_resource_wait( + config, requests_mock, fixtures: Dict[str, Any], want_result: TestResource, want_err: bool +): + for fixture in fixtures: + method = getattr(requests_mock, fixture["method"].lower()) + assert isinstance(fixture["response"], Operation) + method(fixture["url"], json=fixture["response"].as_dict()) + + api_client = client.ApiClient(config) + service = LroTestingAPI(api_client) + lro_op = service.create_test_resource(resource=TestResource()) + + if want_err: + with pytest.raises(Exception): + lro_op.wait(opts=lro.LroOptions(timeout=timedelta(minutes=1))) + else: + result = lro_op.wait(opts=lro.LroOptions(timeout=timedelta(minutes=1))) + assert result == want_result + + +@pytest.mark.parametrize( + "fixtures,want_err", + [ + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/operations/operations/test-resource-create-12345/cancel", + "response": Operation( + done=True, + name="operations/test-resource-create-12345", + ), + }, + ], + False, + id="Success", + ), + ], +) +def test_lro_cancel_test_resource_cancel(config, requests_mock, fixtures: Dict[str, Any], want_err: bool): + for fixture in fixtures: + method = getattr(requests_mock, fixture["method"].lower()) + method(fixture["url"], json=fixture["response"].as_dict()) + + api_client = client.ApiClient(config) + service = LroTestingAPI(api_client) + lro_op = service.create_test_resource(resource=TestResource()) + + if want_err: + with pytest.raises(Exception): + lro_op.cancel() + else: + lro_op.cancel() + + +@pytest.mark.parametrize( + "fixtures,want_name", + [ + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + ], + "operations/test-resource-create-12345", + id="Success", + ), + ], +) +def test_lro_create_test_resource_name(config, requests_mock, fixtures: Dict[str, Any], want_name: str): + for fixture in fixtures: + method = getattr(requests_mock, fixture["method"].lower()) + method(fixture["url"], json=fixture["response"].as_dict()) + + api_client = client.ApiClient(config) + service = LroTestingAPI(api_client) + lro_op = service.create_test_resource(resource=TestResource()) + + name = lro_op.name() + assert name == want_name + + +@pytest.mark.parametrize( + "fixtures,want_metadata,want_err", + [ + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + ], + TestResourceOperationMetadata( + progress_percent=5, + resource_id="test-resource-123", + ), + False, + id="Success", + ), + ], +) +def test_lro_create_test_resource_metadata( + config, requests_mock, fixtures: Dict[str, Any], want_metadata: TestResourceOperationMetadata, want_err: bool +): + for fixture in fixtures: + method = getattr(requests_mock, fixture["method"].lower()) + method(fixture["url"], json=fixture["response"].as_dict()) + + api_client = client.ApiClient(config) + service = LroTestingAPI(api_client) + lro_op = service.create_test_resource(resource=TestResource()) + + if want_err: + with pytest.raises(Exception): + lro_op.metadata() + else: + metadata = lro_op.metadata() + assert metadata == want_metadata + + +@pytest.mark.parametrize( + "fixtures,want_done,want_err", + [ + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + { + "method": "GET", + "url": "http://localhost/api/2.0/lro-testing/operations/operations/test-resource-create-12345", + "response": Operation( + done=True, + metadata={"resource_id": "test-resource-123", "progress_percent": 100}, + name="operations/test-resource-create-12345", + response={"id": "test-resource-123", "name": "test-resource"}, + ), + }, + ], + True, + False, + id="True", + ), + pytest.param( + [ + { + "method": "POST", + "url": "http://localhost/api/2.0/lro-testing/resources", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 5}, + name="operations/test-resource-create-12345", + ), + }, + { + "method": "GET", + "url": "http://localhost/api/2.0/lro-testing/operations/operations/test-resource-create-12345", + "response": Operation( + done=False, + metadata={"resource_id": "test-resource-123", "progress_percent": 75}, + name="operations/test-resource-create-12345", + ), + }, + ], + False, + False, + id="False", + ), + ], +) +def test_lro_create_test_resource_done( + config, requests_mock, fixtures: Dict[str, Any], want_done: bool, want_err: bool +): + for fixture in fixtures: + method = getattr(requests_mock, fixture["method"].lower()) + method(fixture["url"], json=fixture["response"].as_dict()) + + api_client = client.ApiClient(config) + service = LroTestingAPI(api_client) + lro_op = service.create_test_resource(resource=TestResource()) + + if want_err: + with pytest.raises(Exception): + lro_op.done() + else: + done = lro_op.done() + assert done == want_done