From 4d5651527d05d72aacd48aaec1bf5614d88c0692 Mon Sep 17 00:00:00 2001 From: Omer Lachish Date: Mon, 8 Sep 2025 13:40:28 +0200 Subject: [PATCH] update sdk --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + NEXT_CHANGELOG.md | 9 + databricks/sdk/__init__.py | 16 +- databricks/sdk/service/catalog.py | 9 +- databricks/sdk/service/compute.py | 2 + databricks/sdk/service/dashboards.py | 46 +- databricks/sdk/service/iam.py | 5 + databricks/sdk/service/iamv2.py | 643 ++++++++++++++++++ databricks/sdk/service/jobs.py | 17 +- databricks/sdk/service/oauth2.py | 6 +- databricks/sdk/service/pipelines.py | 1 + databricks/sdk/service/sharing.py | 9 + databricks/sdk/service/sql.py | 16 +- databricks/sdk/service/tags.py | 12 +- docs/account/billing/billable_usage.rst | 12 +- docs/account/billing/budget_policy.rst | 24 +- docs/account/billing/budgets.rst | 22 +- docs/account/billing/log_delivery.rst | 42 +- docs/account/billing/usage_dashboards.rst | 8 +- .../account/catalog/metastore_assignments.rst | 26 +- docs/account/catalog/metastores.rst | 22 +- docs/account/catalog/storage_credentials.rst | 26 +- docs/account/iam/access_control.rst | 18 +- docs/account/iam/groups.rst | 40 +- docs/account/iam/service_principals.rst | 38 +- docs/account/iam/users.rst | 44 +- docs/account/iam/workspace_assignment.rst | 18 +- .../account/oauth2/custom_app_integration.rst | 26 +- docs/account/oauth2/federation_policy.rst | 40 +- docs/account/oauth2/o_auth_published_apps.rst | 4 +- .../oauth2/published_app_integration.rst | 26 +- .../service_principal_federation_policy.rst | 38 +- .../oauth2/service_principal_secrets.rst | 21 +- docs/account/provisioning/credentials.rst | 24 +- docs/account/provisioning/encryption_keys.rst | 34 +- docs/account/provisioning/networks.rst | 22 +- docs/account/provisioning/private_access.rst | 58 +- docs/account/provisioning/storage.rst | 22 +- docs/account/provisioning/vpc_endpoints.rst | 34 +- docs/account/provisioning/workspaces.rst | 70 +- .../settings/csp_enablement_account.rst | 12 +- .../settings/disable_legacy_features.rst | 16 +- .../settings/enable_ip_access_lists.rst | 14 +- .../settings/esm_enablement_account.rst | 10 +- docs/account/settings/ip_access_lists.rst | 58 +- .../llm_proxy_partner_powered_account.rst | 10 +- .../llm_proxy_partner_powered_enforce.rst | 10 +- .../account/settings/network_connectivity.rst | 49 +- docs/account/settings/network_policies.rst | 22 +- docs/account/settings/personal_compute.rst | 16 +- docs/account/settings/settings.rst | 6 +- .../workspace_network_configuration.rst | 8 +- docs/account/settingsv2/settings_v2.rst | 14 +- docs/dbdataclasses/agentbricks.rst | 4 + docs/dbdataclasses/apps.rst | 4 + docs/dbdataclasses/billing.rst | 4 + docs/dbdataclasses/catalog.rst | 4 + docs/dbdataclasses/cleanrooms.rst | 4 + docs/dbdataclasses/compute.rst | 10 + docs/dbdataclasses/dashboards.rst | 8 + docs/dbdataclasses/database.rst | 4 + docs/dbdataclasses/files.rst | 4 + docs/dbdataclasses/iam.rst | 4 + docs/dbdataclasses/index.rst | 1 + docs/dbdataclasses/jobs.rst | 4 + docs/dbdataclasses/marketplace.rst | 4 + docs/dbdataclasses/ml.rst | 4 + docs/dbdataclasses/oauth2.rst | 4 + docs/dbdataclasses/pipelines.rst | 7 + docs/dbdataclasses/provisioning.rst | 4 + docs/dbdataclasses/qualitymonitorv2.rst | 4 + docs/dbdataclasses/serving.rst | 4 + docs/dbdataclasses/settings.rst | 4 + docs/dbdataclasses/settingsv2.rst | 4 + docs/dbdataclasses/sharing.rst | 7 + docs/dbdataclasses/sql.rst | 4 + docs/dbdataclasses/tags.rst | 4 + docs/dbdataclasses/vectorsearch.rst | 4 + docs/dbdataclasses/workspace.rst | 4 + docs/workspace/agentbricks/agent_bricks.rst | 32 +- docs/workspace/apps/apps.rst | 56 +- docs/workspace/apps/apps_settings.rst | 20 +- .../workspace/catalog/artifact_allowlists.rst | 8 +- docs/workspace/catalog/catalogs.rst | 26 +- docs/workspace/catalog/connections.rst | 26 +- docs/workspace/catalog/credentials.rst | 44 +- .../catalog/entity_tag_assignments.rst | 42 +- docs/workspace/catalog/external_lineage.rst | 22 +- docs/workspace/catalog/external_locations.rst | 32 +- docs/workspace/catalog/external_metadata.rst | 26 +- docs/workspace/catalog/functions.rst | 28 +- docs/workspace/catalog/grants.rst | 18 +- docs/workspace/catalog/metastores.rst | 52 +- docs/workspace/catalog/model_versions.rst | 38 +- docs/workspace/catalog/online_tables.rst | 14 +- docs/workspace/catalog/policies.rst | 20 +- docs/workspace/catalog/quality_monitors.rst | 74 +- docs/workspace/catalog/registered_models.rst | 64 +- docs/workspace/catalog/resource_quotas.rst | 11 +- docs/workspace/catalog/rfa.rst | 24 +- docs/workspace/catalog/schemas.rst | 22 +- .../workspace/catalog/storage_credentials.rst | 38 +- docs/workspace/catalog/system_schemas.rst | 16 +- docs/workspace/catalog/table_constraints.rst | 18 +- docs/workspace/catalog/tables.rst | 46 +- .../catalog/temporary_path_credentials.rst | 10 +- .../catalog/temporary_table_credentials.rst | 6 +- docs/workspace/catalog/volumes.rst | 44 +- docs/workspace/catalog/workspace_bindings.rst | 24 +- .../cleanrooms/clean_room_asset_revisions.rst | 8 +- .../cleanrooms/clean_room_assets.rst | 30 +- .../clean_room_auto_approval_rules.rst | 22 +- .../cleanrooms/clean_room_task_runs.rst | 4 +- docs/workspace/cleanrooms/clean_rooms.rst | 30 +- docs/workspace/compute/cluster_policies.rst | 62 +- docs/workspace/compute/clusters.rst | 166 ++--- docs/workspace/compute/command_execution.rst | 34 +- .../workspace/compute/global_init_scripts.rst | 32 +- docs/workspace/compute/instance_pools.rst | 50 +- docs/workspace/compute/instance_profiles.rst | 47 +- docs/workspace/compute/libraries.rst | 26 +- .../policy_compliance_for_clusters.rst | 22 +- docs/workspace/compute/policy_families.rst | 12 +- docs/workspace/dashboards/genie.rst | 76 +-- docs/workspace/dashboards/lakeview.rst | 80 +-- .../dashboards/lakeview_embedded.rst | 4 +- docs/workspace/database/database.rst | 106 +-- docs/workspace/files/dbfs.rst | 50 +- docs/workspace/files/files.rst | 62 +- docs/workspace/iam/access_control.rst | 4 +- .../iam/account_access_control_proxy.rst | 18 +- docs/workspace/iam/current_user.rst | 4 +- docs/workspace/iam/groups.rst | 40 +- docs/workspace/iam/permission_migration.rst | 4 +- docs/workspace/iam/permissions.rst | 18 +- docs/workspace/iam/service_principals.rst | 38 +- docs/workspace/iam/users.rst | 60 +- docs/workspace/jobs/jobs.rst | 182 ++--- .../jobs/policy_compliance_for_jobs.rst | 16 +- .../marketplace/consumer_fulfillments.rst | 8 +- .../marketplace/consumer_installations.rst | 22 +- .../marketplace/consumer_listings.rst | 16 +- .../consumer_personalization_requests.rst | 12 +- .../marketplace/consumer_providers.rst | 12 +- .../marketplace/provider_exchange_filters.rst | 18 +- .../marketplace/provider_exchanges.rst | 40 +- docs/workspace/marketplace/provider_files.rst | 18 +- .../marketplace/provider_listings.rst | 22 +- .../provider_personalization_requests.rst | 8 +- ...provider_provider_analytics_dashboards.rst | 16 +- .../marketplace/provider_providers.rst | 22 +- docs/workspace/ml/experiments.rst | 248 +++---- docs/workspace/ml/feature_store.rst | 28 +- docs/workspace/ml/forecasting.rst | 8 +- docs/workspace/ml/materialized_features.rst | 26 +- docs/workspace/ml/model_registry.rst | 278 ++++---- .../service_principal_secrets_proxy.rst | 21 +- docs/workspace/pipelines/pipelines.rst | 70 +- .../qualitymonitorv2/quality_monitor_v2.rst | 22 +- docs/workspace/serving/serving_endpoints.rst | 80 +-- .../serving/serving_endpoints_data_plane.rst | 4 +- ...aibi_dashboard_embedding_access_policy.rst | 14 +- ...i_dashboard_embedding_approved_domains.rst | 14 +- .../settings/automatic_cluster_update.rst | 10 +- .../settings/compliance_security_profile.rst | 12 +- .../settings/credentials_manager.rst | 4 +- .../dashboard_email_subscriptions.rst | 14 +- docs/workspace/settings/default_namespace.rst | 18 +- .../settings/default_warehouse_id.rst | 14 +- .../settings/disable_legacy_access.rst | 16 +- .../settings/disable_legacy_dbfs.rst | 18 +- .../settings/enable_export_notebook.rst | 10 +- .../enable_notebook_table_clipboard.rst | 10 +- .../settings/enable_results_downloading.rst | 10 +- .../settings/enhanced_security_monitoring.rst | 12 +- docs/workspace/settings/ip_access_lists.rst | 58 +- .../llm_proxy_partner_powered_workspace.rst | 14 +- .../settings/notification_destinations.rst | 22 +- .../settings/restrict_workspace_admins.rst | 14 +- docs/workspace/settings/settings.rst | 14 +- .../settings/sql_results_download.rst | 14 +- docs/workspace/settings/token_management.rst | 34 +- docs/workspace/settings/tokens.rst | 18 +- docs/workspace/settings/workspace_conf.rst | 10 +- .../settingsv2/workspace_settings_v2.rst | 14 +- docs/workspace/sharing/providers.rst | 32 +- .../sharing/recipient_activation.rst | 12 +- .../sharing/recipient_federation_policies.rst | 30 +- docs/workspace/sharing/recipients.rst | 38 +- docs/workspace/sharing/shares.rst | 42 +- docs/workspace/sql/alerts.rst | 24 +- docs/workspace/sql/alerts_legacy.rst | 48 +- docs/workspace/sql/alerts_v2.rst | 24 +- docs/workspace/sql/dashboard_widgets.rst | 14 +- docs/workspace/sql/dashboards.rst | 28 +- docs/workspace/sql/data_sources.rst | 14 +- docs/workspace/sql/dbsql_permissions.rst | 36 +- docs/workspace/sql/queries.rst | 28 +- docs/workspace/sql/queries_legacy.rst | 78 +-- docs/workspace/sql/query_history.rst | 6 +- docs/workspace/sql/query_visualizations.rst | 16 +- .../sql/query_visualizations_legacy.rst | 30 +- docs/workspace/sql/redash_config.rst | 4 +- docs/workspace/sql/statement_execution.rst | 99 ++- docs/workspace/sql/warehouses.rst | 96 +-- docs/workspace/tags/tag_policies.rst | 36 +- .../vectorsearch/vector_search_endpoints.rst | 26 +- .../vectorsearch/vector_search_indexes.rst | 50 +- docs/workspace/workspace/git_credentials.rst | 32 +- docs/workspace/workspace/repos.rst | 44 +- docs/workspace/workspace/secrets.rst | 180 ++--- docs/workspace/workspace/workspace.rst | 62 +- 213 files changed, 3751 insertions(+), 2888 deletions(-) create mode 100755 databricks/sdk/service/iamv2.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8af35ea49..128e29bee 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -b95c2c6e21bec9551ec7d7d51ddf2dfe390b4522 \ No newline at end of file +8182e896c6797f3724042b048d466a78052b59d9 diff --git a/.gitattributes b/.gitattributes index e26b931e3..e68ce1aca 100755 --- a/.gitattributes +++ b/.gitattributes @@ -11,6 +11,7 @@ databricks/sdk/service/dashboards.py linguist-generated=true databricks/sdk/service/database.py linguist-generated=true databricks/sdk/service/files.py linguist-generated=true databricks/sdk/service/iam.py linguist-generated=true +databricks/sdk/service/iamv2.py linguist-generated=true databricks/sdk/service/jobs.py linguist-generated=true databricks/sdk/service/marketplace.py linguist-generated=true databricks/sdk/service/ml.py linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index b9a5380af..c96a835d4 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,3 +13,12 @@ ### Internal Changes ### API Changes +* Added `databricks.sdk.service.iamv2` package. +* Added `feedback` field for `databricks.sdk.service.dashboards.GenieMessage`. +* Added `disabled` field for `databricks.sdk.service.jobs.Task`. +* Added `auxiliary_managed_location` field for `databricks.sdk.service.sharing.TableInternalAttributes`. +* Added `alerts` field for `databricks.sdk.service.sql.ListAlertsV2Response`. +* Added `no_activated_k8s` and `usage_policy_entitlement_denied` enum values for `databricks.sdk.service.compute.TerminationReasonCode`. +* Added `foreign_catalog` enum value for `databricks.sdk.service.pipelines.IngestionSourceType`. +* Added `foreign_iceberg_table` enum value for `databricks.sdk.service.sharing.TableInternalAttributesSharedTableType`. +* [Breaking] Removed `disabled` field for `databricks.sdk.service.jobs.RunTask`. diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index a426eebfb..ff834e5b5 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -23,6 +23,7 @@ from databricks.sdk.service import database as pkg_database from databricks.sdk.service import files as pkg_files from databricks.sdk.service import iam as pkg_iam +from databricks.sdk.service import iamv2 as pkg_iamv2 from databricks.sdk.service import jobs as pkg_jobs from databricks.sdk.service import marketplace as pkg_marketplace from databricks.sdk.service import ml as pkg_ml @@ -88,6 +89,7 @@ GroupsAPI, PermissionMigrationAPI, PermissionsAPI, ServicePrincipalsAPI, UsersAPI, WorkspaceAssignmentAPI) +from databricks.sdk.service.iamv2 import AccountIamV2API, WorkspaceIamV2API from databricks.sdk.service.jobs import JobsAPI, PolicyComplianceForJobsAPI from databricks.sdk.service.marketplace import ( ConsumerFulfillmentsAPI, ConsumerInstallationsAPI, ConsumerListingsAPI, @@ -371,6 +373,7 @@ def __init__( self._workspace_conf = pkg_settings.WorkspaceConfAPI(self._api_client) self._workspace_settings_v2 = pkg_settingsv2.WorkspaceSettingsV2API(self._api_client) self._forecasting = pkg_ml.ForecastingAPI(self._api_client) + self._workspace_iam_v2 = pkg_iamv2.WorkspaceIamV2API(self._api_client) @property def config(self) -> client.Config: @@ -896,7 +899,7 @@ def tables(self) -> pkg_catalog.TablesAPI: @property def tag_policies(self) -> pkg_tags.TagPoliciesAPI: - """The Tag Policy API allows you to manage tag policies in Databricks.""" + """The Tag Policy API allows you to manage policies for governed tags in Databricks.""" return self._tag_policies @property @@ -969,6 +972,11 @@ def forecasting(self) -> pkg_ml.ForecastingAPI: """The Forecasting API allows you to create and get serverless forecasting experiments.""" return self._forecasting + @property + def workspace_iam_v2(self) -> pkg_iamv2.WorkspaceIamV2API: + """These APIs are used to manage identities and the workspace access of these identities in .""" + return self._workspace_iam_v2 + def get_workspace_id(self) -> int: """Get the workspace ID of the workspace that this client is connected to.""" response = self._api_client.do("GET", "/api/2.0/preview/scim/v2/Me", response_headers=["X-Databricks-Org-Id"]) @@ -1074,6 +1082,7 @@ def __init__( self._workspace_assignment = pkg_iam.WorkspaceAssignmentAPI(self._api_client) self._workspace_network_configuration = pkg_settings.WorkspaceNetworkConfigurationAPI(self._api_client) self._workspaces = pkg_provisioning.WorkspacesAPI(self._api_client) + self._iam_v2 = pkg_iamv2.AccountIamV2API(self._api_client) self._budgets = pkg_billing.BudgetsAPI(self._api_client) @property @@ -1239,6 +1248,11 @@ def workspaces(self) -> pkg_provisioning.WorkspacesAPI: """These APIs manage workspaces for this account.""" return self._workspaces + @property + def iam_v2(self) -> pkg_iamv2.AccountIamV2API: + """These APIs are used to manage identities and the workspace access of these identities in .""" + return self._iam_v2 + @property def budgets(self) -> pkg_billing.BudgetsAPI: """These APIs manage budget configurations for this account.""" diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 3db28a23e..7ab843217 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -3652,7 +3652,8 @@ class ExternalLocationInfo: sufficient.""" file_event_queue: Optional[FileEventQueue] = None - """File event queue settings.""" + """File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly + one of the documented properties.""" isolation_mode: Optional[IsolationMode] = None @@ -11945,7 +11946,8 @@ def create( enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. :param file_event_queue: :class:`FileEventQueue` (optional) - File event queue settings. + File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly one + of the documented properties. :param read_only: bool (optional) Indicates whether the external location is read-only. :param skip_validation: bool (optional) @@ -12107,7 +12109,8 @@ def update( enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. :param file_event_queue: :class:`FileEventQueue` (optional) - File event queue settings. + File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly one + of the documented properties. :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. :param isolation_mode: :class:`IsolationMode` (optional) diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index c7ca04416..34ec07465 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -7151,6 +7151,7 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE = "NETWORK_CHECK_STORAGE_FAILURE" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" + NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -7189,6 +7190,7 @@ class TerminationReasonCode(Enum): UNKNOWN = "UNKNOWN" UNSUPPORTED_INSTANCE_TYPE = "UNSUPPORTED_INSTANCE_TYPE" UPDATE_INSTANCE_PROFILE_FAILURE = "UPDATE_INSTANCE_PROFILE_FAILURE" + USAGE_POLICY_ENTITLEMENT_DENIED = "USAGE_POLICY_ENTITLEMENT_DENIED" USER_INITIATED_VM_TERMINATION = "USER_INITIATED_VM_TERMINATION" USER_REQUEST = "USER_REQUEST" WORKER_SETUP_FAILURE = "WORKER_SETUP_FAILURE" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index b71f2c866..f1b0ea9ac 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -413,6 +413,40 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieConversationSummary: ) +@dataclass +class GenieFeedback: + """Feedback containing rating and optional comment""" + + comment: Optional[str] = None + """Optional feedback comment text""" + + rating: Optional[GenieFeedbackRating] = None + """The feedback rating""" + + def as_dict(self) -> dict: + """Serializes the GenieFeedback into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.comment is not None: + body["comment"] = self.comment + if self.rating is not None: + body["rating"] = self.rating.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GenieFeedback into a shallow dictionary of its immediate attributes.""" + body = {} + if self.comment is not None: + body["comment"] = self.comment + if self.rating is not None: + body["rating"] = self.rating + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GenieFeedback: + """Deserializes the GenieFeedback from a dictionary.""" + return cls(comment=d.get("comment", None), rating=_enum(d, "rating", GenieFeedbackRating)) + + class GenieFeedbackRating(Enum): """Feedback rating for Genie messages""" @@ -572,6 +606,9 @@ class GenieMessage: error: Optional[MessageError] = None """Error message if Genie failed to respond to the message""" + feedback: Optional[GenieFeedback] = None + """User feedback for the message if provided""" + last_updated_timestamp: Optional[int] = None """Timestamp when the message was last updated""" @@ -597,6 +634,8 @@ def as_dict(self) -> dict: body["created_timestamp"] = self.created_timestamp if self.error: body["error"] = self.error.as_dict() + if self.feedback: + body["feedback"] = self.feedback.as_dict() if self.id is not None: body["id"] = self.id if self.last_updated_timestamp is not None: @@ -626,6 +665,8 @@ def as_shallow_dict(self) -> dict: body["created_timestamp"] = self.created_timestamp if self.error: body["error"] = self.error + if self.feedback: + body["feedback"] = self.feedback if self.id is not None: body["id"] = self.id if self.last_updated_timestamp is not None: @@ -651,6 +692,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieMessage: conversation_id=d.get("conversation_id", None), created_timestamp=d.get("created_timestamp", None), error=_from_dict(d, "error", MessageError), + feedback=_from_dict(d, "feedback", GenieFeedback), id=d.get("id", None), last_updated_timestamp=d.get("last_updated_timestamp", None), message_id=d.get("message_id", None), @@ -1921,8 +1963,8 @@ def list_conversations( :param space_id: str The ID of the Genie space to retrieve conversations from. :param include_all: bool (optional) - Include all conversations in the space across all users. Requires "Can Manage" permission on the - space. + Include all conversations in the space across all users. Requires at least CAN MANAGE permission on + the space. :param page_size: int (optional) Maximum number of conversations to return per page :param page_token: str (optional) diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index 2991a890f..e5f0d3f68 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -425,6 +425,7 @@ class Group: [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements""" external_id: Optional[str] = None + """external_id should be unique for identifying groups""" groups: Optional[List[ComplexValue]] = None @@ -2111,6 +2112,7 @@ def create( [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param id: str (optional) Databricks group ID @@ -2308,6 +2310,7 @@ def update( [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param members: List[:class:`ComplexValue`] (optional) :param meta: :class:`ResourceMeta` (optional) @@ -3026,6 +3029,7 @@ def create( [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param id: str (optional) Databricks group ID @@ -3215,6 +3219,7 @@ def update( [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param members: List[:class:`ComplexValue`] (optional) :param meta: :class:`ResourceMeta` (optional) diff --git a/databricks/sdk/service/iamv2.py b/databricks/sdk/service/iamv2.py new file mode 100755 index 000000000..25cd2ad25 --- /dev/null +++ b/databricks/sdk/service/iamv2.py @@ -0,0 +1,643 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional + +from ._internal import _enum, _from_dict, _repeated_enum + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class Group: + """The details of a Group resource.""" + + account_id: Optional[str] = None + """The parent account ID for group in Databricks.""" + + external_id: Optional[str] = None + """ExternalId of the group in the customer's IdP.""" + + group_name: Optional[str] = None + """Display name of the group.""" + + internal_id: Optional[int] = None + """Internal group ID of the group in Databricks.""" + + def as_dict(self) -> dict: + """Serializes the Group into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.external_id is not None: + body["external_id"] = self.external_id + if self.group_name is not None: + body["group_name"] = self.group_name + if self.internal_id is not None: + body["internal_id"] = self.internal_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Group into a shallow dictionary of its immediate attributes.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.external_id is not None: + body["external_id"] = self.external_id + if self.group_name is not None: + body["group_name"] = self.group_name + if self.internal_id is not None: + body["internal_id"] = self.internal_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Group: + """Deserializes the Group from a dictionary.""" + return cls( + account_id=d.get("account_id", None), + external_id=d.get("external_id", None), + group_name=d.get("group_name", None), + internal_id=d.get("internal_id", None), + ) + + +class PrincipalType(Enum): + """The type of the principal (user/sp/group).""" + + GROUP = "GROUP" + SERVICE_PRINCIPAL = "SERVICE_PRINCIPAL" + USER = "USER" + + +@dataclass +class ResolveGroupResponse: + group: Optional[Group] = None + """The group that was resolved.""" + + def as_dict(self) -> dict: + """Serializes the ResolveGroupResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.group: + body["group"] = self.group.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ResolveGroupResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.group: + body["group"] = self.group + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ResolveGroupResponse: + """Deserializes the ResolveGroupResponse from a dictionary.""" + return cls(group=_from_dict(d, "group", Group)) + + +@dataclass +class ResolveServicePrincipalResponse: + service_principal: Optional[ServicePrincipal] = None + """The service principal that was resolved.""" + + def as_dict(self) -> dict: + """Serializes the ResolveServicePrincipalResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.service_principal: + body["service_principal"] = self.service_principal.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ResolveServicePrincipalResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.service_principal: + body["service_principal"] = self.service_principal + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ResolveServicePrincipalResponse: + """Deserializes the ResolveServicePrincipalResponse from a dictionary.""" + return cls(service_principal=_from_dict(d, "service_principal", ServicePrincipal)) + + +@dataclass +class ResolveUserResponse: + user: Optional[User] = None + """The user that was resolved.""" + + def as_dict(self) -> dict: + """Serializes the ResolveUserResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.user: + body["user"] = self.user.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ResolveUserResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.user: + body["user"] = self.user + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ResolveUserResponse: + """Deserializes the ResolveUserResponse from a dictionary.""" + return cls(user=_from_dict(d, "user", User)) + + +@dataclass +class ServicePrincipal: + """The details of a ServicePrincipal resource.""" + + account_id: Optional[str] = None + """The parent account ID for the service principal in Databricks.""" + + account_sp_status: Optional[State] = None + """The activity status of a service principal in a Databricks account.""" + + application_id: Optional[str] = None + """Application ID of the service principal.""" + + display_name: Optional[str] = None + """Display name of the service principal.""" + + external_id: Optional[str] = None + """ExternalId of the service principal in the customer's IdP.""" + + internal_id: Optional[int] = None + """Internal service principal ID of the service principal in Databricks.""" + + def as_dict(self) -> dict: + """Serializes the ServicePrincipal into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.account_sp_status is not None: + body["account_sp_status"] = self.account_sp_status.value + if self.application_id is not None: + body["application_id"] = self.application_id + if self.display_name is not None: + body["display_name"] = self.display_name + if self.external_id is not None: + body["external_id"] = self.external_id + if self.internal_id is not None: + body["internal_id"] = self.internal_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ServicePrincipal into a shallow dictionary of its immediate attributes.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.account_sp_status is not None: + body["account_sp_status"] = self.account_sp_status + if self.application_id is not None: + body["application_id"] = self.application_id + if self.display_name is not None: + body["display_name"] = self.display_name + if self.external_id is not None: + body["external_id"] = self.external_id + if self.internal_id is not None: + body["internal_id"] = self.internal_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ServicePrincipal: + """Deserializes the ServicePrincipal from a dictionary.""" + return cls( + account_id=d.get("account_id", None), + account_sp_status=_enum(d, "account_sp_status", State), + application_id=d.get("application_id", None), + display_name=d.get("display_name", None), + external_id=d.get("external_id", None), + internal_id=d.get("internal_id", None), + ) + + +class State(Enum): + """The activity status of a user or service principal in a Databricks account or workspace.""" + + ACTIVE = "ACTIVE" + INACTIVE = "INACTIVE" + + +@dataclass +class User: + """The details of a User resource.""" + + username: str + """Username/email of the user.""" + + account_id: Optional[str] = None + """The accountId parent of the user in Databricks.""" + + account_user_status: Optional[State] = None + """The activity status of a user in a Databricks account.""" + + external_id: Optional[str] = None + """ExternalId of the user in the customer's IdP.""" + + internal_id: Optional[int] = None + """Internal userId of the user in Databricks.""" + + name: Optional[UserName] = None + + def as_dict(self) -> dict: + """Serializes the User into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.account_user_status is not None: + body["account_user_status"] = self.account_user_status.value + if self.external_id is not None: + body["external_id"] = self.external_id + if self.internal_id is not None: + body["internal_id"] = self.internal_id + if self.name: + body["name"] = self.name.as_dict() + if self.username is not None: + body["username"] = self.username + return body + + def as_shallow_dict(self) -> dict: + """Serializes the User into a shallow dictionary of its immediate attributes.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.account_user_status is not None: + body["account_user_status"] = self.account_user_status + if self.external_id is not None: + body["external_id"] = self.external_id + if self.internal_id is not None: + body["internal_id"] = self.internal_id + if self.name: + body["name"] = self.name + if self.username is not None: + body["username"] = self.username + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> User: + """Deserializes the User from a dictionary.""" + return cls( + account_id=d.get("account_id", None), + account_user_status=_enum(d, "account_user_status", State), + external_id=d.get("external_id", None), + internal_id=d.get("internal_id", None), + name=_from_dict(d, "name", UserName), + username=d.get("username", None), + ) + + +@dataclass +class UserName: + family_name: Optional[str] = None + + given_name: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the UserName into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.family_name is not None: + body["family_name"] = self.family_name + if self.given_name is not None: + body["given_name"] = self.given_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UserName into a shallow dictionary of its immediate attributes.""" + body = {} + if self.family_name is not None: + body["family_name"] = self.family_name + if self.given_name is not None: + body["given_name"] = self.given_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UserName: + """Deserializes the UserName from a dictionary.""" + return cls(family_name=d.get("family_name", None), given_name=d.get("given_name", None)) + + +@dataclass +class WorkspaceAccessDetail: + """The details of a principal's access to a workspace.""" + + access_type: Optional[WorkspaceAccessDetailAccessType] = None + + account_id: Optional[str] = None + """The account ID parent of the workspace where the principal has access.""" + + permissions: Optional[List[WorkspacePermission]] = None + """The permissions granted to the principal in the workspace.""" + + principal_id: Optional[int] = None + """The internal ID of the principal (user/sp/group) in Databricks.""" + + principal_type: Optional[PrincipalType] = None + + status: Optional[State] = None + """The activity status of the principal in the workspace. Not applicable for groups at the moment.""" + + workspace_id: Optional[int] = None + """The workspace ID where the principal has access.""" + + def as_dict(self) -> dict: + """Serializes the WorkspaceAccessDetail into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.access_type is not None: + body["access_type"] = self.access_type.value + if self.account_id is not None: + body["account_id"] = self.account_id + if self.permissions: + body["permissions"] = [v.value for v in self.permissions] + if self.principal_id is not None: + body["principal_id"] = self.principal_id + if self.principal_type is not None: + body["principal_type"] = self.principal_type.value + if self.status is not None: + body["status"] = self.status.value + if self.workspace_id is not None: + body["workspace_id"] = self.workspace_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the WorkspaceAccessDetail into a shallow dictionary of its immediate attributes.""" + body = {} + if self.access_type is not None: + body["access_type"] = self.access_type + if self.account_id is not None: + body["account_id"] = self.account_id + if self.permissions: + body["permissions"] = self.permissions + if self.principal_id is not None: + body["principal_id"] = self.principal_id + if self.principal_type is not None: + body["principal_type"] = self.principal_type + if self.status is not None: + body["status"] = self.status + if self.workspace_id is not None: + body["workspace_id"] = self.workspace_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> WorkspaceAccessDetail: + """Deserializes the WorkspaceAccessDetail from a dictionary.""" + return cls( + access_type=_enum(d, "access_type", WorkspaceAccessDetailAccessType), + account_id=d.get("account_id", None), + permissions=_repeated_enum(d, "permissions", WorkspacePermission), + principal_id=d.get("principal_id", None), + principal_type=_enum(d, "principal_type", PrincipalType), + status=_enum(d, "status", State), + workspace_id=d.get("workspace_id", None), + ) + + +class WorkspaceAccessDetailAccessType(Enum): + """The type of access the principal has to the workspace.""" + + DIRECT = "DIRECT" + INDIRECT = "INDIRECT" + + +class WorkspaceAccessDetailView(Enum): + """Controls what fields are returned in the GetWorkspaceAccessDetail response.""" + + BASIC = "BASIC" + FULL = "FULL" + + +class WorkspacePermission(Enum): + """The type of permission a principal has to a workspace (admin/user).""" + + ADMIN_PERMISSION = "ADMIN_PERMISSION" + USER_PERMISSION = "USER_PERMISSION" + + +class AccountIamV2API: + """These APIs are used to manage identities and the workspace access of these identities in .""" + + def __init__(self, api_client): + self._api = api_client + + def get_workspace_access_detail( + self, workspace_id: int, principal_id: int, *, view: Optional[WorkspaceAccessDetailView] = None + ) -> WorkspaceAccessDetail: + """Returns the access details for a principal in a workspace. Allows for checking access details for any + provisioned principal (user, service principal, or group) in a workspace. * Provisioned principal here + refers to one that has been synced into Databricks from the customer's IdP or added explicitly to + Databricks via SCIM/UI. Allows for passing in a "view" parameter to control what fields are returned + (BASIC by default or FULL). + + :param workspace_id: int + Required. The workspace ID for which the access details are being requested. + :param principal_id: int + Required. The internal ID of the principal (user/sp/group) for which the access details are being + requested. + :param view: :class:`WorkspaceAccessDetailView` (optional) + Controls what fields are returned. + + :returns: :class:`WorkspaceAccessDetail` + """ + + query = {} + if view is not None: + query["view"] = view.value + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", + f"/api/2.0/identity/accounts/{self._api.account_id}/workspaces/{workspace_id}/workspaceAccessDetails/{principal_id}", + query=query, + headers=headers, + ) + return WorkspaceAccessDetail.from_dict(res) + + def resolve_group(self, external_id: str) -> ResolveGroupResponse: + """Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it + will be created in the account. If the customer is not onboarded onto Automatic Identity Management + (AIM), this will return an error. + + :param external_id: str + Required. The external ID of the group in the customer's IdP. + + :returns: :class:`ResolveGroupResponse` + """ + body = {} + if external_id is not None: + body["external_id"] = external_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", + f"/api/2.0/identity/accounts/{self._api.account_id}/groups/resolveByExternalId", + body=body, + headers=headers, + ) + return ResolveGroupResponse.from_dict(res) + + def resolve_service_principal(self, external_id: str) -> ResolveServicePrincipalResponse: + """Resolves an SP with the given external ID from the customer's IdP. If the SP does not exist, it will + be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the service principal in the customer's IdP. + + :returns: :class:`ResolveServicePrincipalResponse` + """ + body = {} + if external_id is not None: + body["external_id"] = external_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", + f"/api/2.0/identity/accounts/{self._api.account_id}/servicePrincipals/resolveByExternalId", + body=body, + headers=headers, + ) + return ResolveServicePrincipalResponse.from_dict(res) + + def resolve_user(self, external_id: str) -> ResolveUserResponse: + """Resolves a user with the given external ID from the customer's IdP. If the user does not exist, it + will be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the user in the customer's IdP. + + :returns: :class:`ResolveUserResponse` + """ + body = {} + if external_id is not None: + body["external_id"] = external_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", + f"/api/2.0/identity/accounts/{self._api.account_id}/users/resolveByExternalId", + body=body, + headers=headers, + ) + return ResolveUserResponse.from_dict(res) + + +class WorkspaceIamV2API: + """These APIs are used to manage identities and the workspace access of these identities in .""" + + def __init__(self, api_client): + self._api = api_client + + def get_workspace_access_detail_local( + self, principal_id: int, *, view: Optional[WorkspaceAccessDetailView] = None + ) -> WorkspaceAccessDetail: + """Returns the access details for a principal in the current workspace. Allows for checking access + details for any provisioned principal (user, service principal, or group) in the current workspace. * + Provisioned principal here refers to one that has been synced into Databricks from the customer's IdP + or added explicitly to Databricks via SCIM/UI. Allows for passing in a "view" parameter to control + what fields are returned (BASIC by default or FULL). + + :param principal_id: int + Required. The internal ID of the principal (user/sp/group) for which the access details are being + requested. + :param view: :class:`WorkspaceAccessDetailView` (optional) + Controls what fields are returned. + + :returns: :class:`WorkspaceAccessDetail` + """ + + query = {} + if view is not None: + query["view"] = view.value + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/workspaceAccessDetails/{principal_id}", query=query, headers=headers + ) + return WorkspaceAccessDetail.from_dict(res) + + def resolve_group_proxy(self, external_id: str) -> ResolveGroupResponse: + """Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it + will be created in the account. If the customer is not onboarded onto Automatic Identity Management + (AIM), this will return an error. + + :param external_id: str + Required. The external ID of the group in the customer's IdP. + + :returns: :class:`ResolveGroupResponse` + """ + body = {} + if external_id is not None: + body["external_id"] = external_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/identity/groups/resolveByExternalId", body=body, headers=headers) + return ResolveGroupResponse.from_dict(res) + + def resolve_service_principal_proxy(self, external_id: str) -> ResolveServicePrincipalResponse: + """Resolves an SP with the given external ID from the customer's IdP. If the SP does not exist, it will + be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the service principal in the customer's IdP. + + :returns: :class:`ResolveServicePrincipalResponse` + """ + body = {} + if external_id is not None: + body["external_id"] = external_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", "/api/2.0/identity/servicePrincipals/resolveByExternalId", body=body, headers=headers + ) + return ResolveServicePrincipalResponse.from_dict(res) + + def resolve_user_proxy(self, external_id: str) -> ResolveUserResponse: + """Resolves a user with the given external ID from the customer's IdP. If the user does not exist, it + will be created. If the customer is not onboarded onto Automatic Identity Management (AIM), this will + return an error. + + :param external_id: str + Required. The external ID of the user in the customer's IdP. + + :returns: :class:`ResolveUserResponse` + """ + body = {} + if external_id is not None: + body["external_id"] = external_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/identity/users/resolveByExternalId", body=body, headers=headers) + return ResolveUserResponse.from_dict(res) diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 9f023448b..4445c2310 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -5696,9 +5696,6 @@ class RunTask: description: Optional[str] = None """An optional description for this task.""" - disabled: Optional[bool] = None - """Deprecated, field was never used in production.""" - effective_performance_target: Optional[PerformanceTarget] = None """The actual performance target used by the serverless run during execution. This can differ from the client-set performance target on the request depending on whether the performance mode is @@ -5873,8 +5870,6 @@ def as_dict(self) -> dict: body["depends_on"] = [v.as_dict() for v in self.depends_on] if self.description is not None: body["description"] = self.description - if self.disabled is not None: - body["disabled"] = self.disabled if self.effective_performance_target is not None: body["effective_performance_target"] = self.effective_performance_target.value if self.email_notifications: @@ -5972,8 +5967,6 @@ def as_shallow_dict(self) -> dict: body["depends_on"] = self.depends_on if self.description is not None: body["description"] = self.description - if self.disabled is not None: - body["disabled"] = self.disabled if self.effective_performance_target is not None: body["effective_performance_target"] = self.effective_performance_target if self.email_notifications: @@ -6061,7 +6054,6 @@ def from_dict(cls, d: Dict[str, Any]) -> RunTask: dbt_task=_from_dict(d, "dbt_task", DbtTask), depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), - disabled=d.get("disabled", None), effective_performance_target=_enum(d, "effective_performance_target", PerformanceTarget), email_notifications=_from_dict(d, "email_notifications", JobEmailNotifications), end_time=d.get("end_time", None), @@ -7438,6 +7430,10 @@ class Task: disable_auto_optimization: Optional[bool] = None """An option to disable auto optimization in serverless""" + disabled: Optional[bool] = None + """An optional flag to disable the task. If set to true, the task will not run even if it is part + of a job.""" + email_notifications: Optional[TaskEmailNotifications] = None """An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.""" @@ -7568,6 +7564,8 @@ def as_dict(self) -> dict: body["description"] = self.description if self.disable_auto_optimization is not None: body["disable_auto_optimization"] = self.disable_auto_optimization + if self.disabled is not None: + body["disabled"] = self.disabled if self.email_notifications: body["email_notifications"] = self.email_notifications.as_dict() if self.environment_key is not None: @@ -7643,6 +7641,8 @@ def as_shallow_dict(self) -> dict: body["description"] = self.description if self.disable_auto_optimization is not None: body["disable_auto_optimization"] = self.disable_auto_optimization + if self.disabled is not None: + body["disabled"] = self.disabled if self.email_notifications: body["email_notifications"] = self.email_notifications if self.environment_key is not None: @@ -7710,6 +7710,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Task: depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), disable_auto_optimization=d.get("disable_auto_optimization", None), + disabled=d.get("disabled", None), email_notifications=_from_dict(d, "email_notifications", TaskEmailNotifications), environment_key=d.get("environment_key", None), existing_cluster_id=d.get("existing_cluster_id", None), diff --git a/databricks/sdk/service/oauth2.py b/databricks/sdk/service/oauth2.py index 4e762e763..58c57808d 100755 --- a/databricks/sdk/service/oauth2.py +++ b/databricks/sdk/service/oauth2.py @@ -232,11 +232,11 @@ class FederationPolicy: oidc_policy: Optional[OidcFederationPolicy] = None policy_id: Optional[str] = None - """The ID of the federation policy.""" + """The ID of the federation policy. Output only.""" service_principal_id: Optional[int] = None - """The service principal ID that this federation policy applies to. Only set for service principal - federation policies.""" + """The service principal ID that this federation policy applies to. Output only. Only set for + service principal federation policies.""" uid: Optional[str] = None """Unique, immutable id of the federation policy.""" diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index a9e8ee11b..56b73252e 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -735,6 +735,7 @@ class IngestionSourceType(Enum): BIGQUERY = "BIGQUERY" CONFLUENCE = "CONFLUENCE" DYNAMICS365 = "DYNAMICS365" + FOREIGN_CATALOG = "FOREIGN_CATALOG" GA4_RAW_DATA = "GA4_RAW_DATA" MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" META_MARKETING = "META_MARKETING" diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index fd8063c3d..92b5dac68 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -2307,6 +2307,9 @@ def from_dict(cls, d: Dict[str, Any]) -> Table: class TableInternalAttributes: """Internal information for D2D sharing that should not be disclosed to external users.""" + auxiliary_managed_location: Optional[str] = None + """Managed Delta Metadata location for foreign iceberg tables.""" + parent_storage_location: Optional[str] = None """Will be populated in the reconciliation response for VIEW and FOREIGN_TABLE, with the value of the parent UC entity's storage_location, following the same logic as getManagedEntityPath in @@ -2327,6 +2330,8 @@ class TableInternalAttributes: def as_dict(self) -> dict: """Serializes the TableInternalAttributes into a dictionary suitable for use as a JSON request body.""" body = {} + if self.auxiliary_managed_location is not None: + body["auxiliary_managed_location"] = self.auxiliary_managed_location if self.parent_storage_location is not None: body["parent_storage_location"] = self.parent_storage_location if self.storage_location is not None: @@ -2340,6 +2345,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the TableInternalAttributes into a shallow dictionary of its immediate attributes.""" body = {} + if self.auxiliary_managed_location is not None: + body["auxiliary_managed_location"] = self.auxiliary_managed_location if self.parent_storage_location is not None: body["parent_storage_location"] = self.parent_storage_location if self.storage_location is not None: @@ -2354,6 +2361,7 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> TableInternalAttributes: """Deserializes the TableInternalAttributes from a dictionary.""" return cls( + auxiliary_managed_location=d.get("auxiliary_managed_location", None), parent_storage_location=d.get("parent_storage_location", None), storage_location=d.get("storage_location", None), type=_enum(d, "type", TableInternalAttributesSharedTableType), @@ -2366,6 +2374,7 @@ class TableInternalAttributesSharedTableType(Enum): DELTA_ICEBERG_TABLE = "DELTA_ICEBERG_TABLE" DIRECTORY_BASED_TABLE = "DIRECTORY_BASED_TABLE" FILE_BASED_TABLE = "FILE_BASED_TABLE" + FOREIGN_ICEBERG_TABLE = "FOREIGN_ICEBERG_TABLE" FOREIGN_TABLE = "FOREIGN_TABLE" MATERIALIZED_VIEW = "MATERIALIZED_VIEW" STREAMING_TABLE = "STREAMING_TABLE" diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 9a80d1f5b..926413730 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -792,7 +792,8 @@ class AlertV2Evaluation: """Operator used for comparison in alert evaluation.""" empty_result_state: Optional[AlertEvaluationState] = None - """Alert state if result is empty.""" + """Alert state if result is empty. Please avoid setting this field to be `UNKNOWN` because + `UNKNOWN` state is planned to be deprecated.""" last_evaluated_at: Optional[str] = None """Timestamp of the last evaluation.""" @@ -3914,13 +3915,18 @@ def from_dict(cls, d: Dict[str, Any]) -> ListAlertsResponseAlert: @dataclass class ListAlertsV2Response: + alerts: Optional[List[AlertV2]] = None + next_page_token: Optional[str] = None results: Optional[List[AlertV2]] = None + """Deprecated. Use `alerts` instead.""" def as_dict(self) -> dict: """Serializes the ListAlertsV2Response into a dictionary suitable for use as a JSON request body.""" body = {} + if self.alerts: + body["alerts"] = [v.as_dict() for v in self.alerts] if self.next_page_token is not None: body["next_page_token"] = self.next_page_token if self.results: @@ -3930,6 +3936,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the ListAlertsV2Response into a shallow dictionary of its immediate attributes.""" body = {} + if self.alerts: + body["alerts"] = self.alerts if self.next_page_token is not None: body["next_page_token"] = self.next_page_token if self.results: @@ -3939,7 +3947,11 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> ListAlertsV2Response: """Deserializes the ListAlertsV2Response from a dictionary.""" - return cls(next_page_token=d.get("next_page_token", None), results=_repeated_dict(d, "results", AlertV2)) + return cls( + alerts=_repeated_dict(d, "alerts", AlertV2), + next_page_token=d.get("next_page_token", None), + results=_repeated_dict(d, "results", AlertV2), + ) class ListOrder(Enum): diff --git a/databricks/sdk/service/tags.py b/databricks/sdk/service/tags.py index 8962682fa..051a0d827 100755 --- a/databricks/sdk/service/tags.py +++ b/databricks/sdk/service/tags.py @@ -118,13 +118,13 @@ def from_dict(cls, d: Dict[str, Any]) -> Value: class TagPoliciesAPI: - """The Tag Policy API allows you to manage tag policies in Databricks.""" + """The Tag Policy API allows you to manage policies for governed tags in Databricks.""" def __init__(self, api_client): self._api = api_client def create_tag_policy(self, tag_policy: TagPolicy) -> TagPolicy: - """Creates a new tag policy. + """Creates a new tag policy, making the associated tag key governed. :param tag_policy: :class:`TagPolicy` @@ -140,7 +140,7 @@ def create_tag_policy(self, tag_policy: TagPolicy) -> TagPolicy: return TagPolicy.from_dict(res) def delete_tag_policy(self, tag_key: str): - """Deletes a tag policy by its key. + """Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. :param tag_key: str @@ -154,7 +154,7 @@ def delete_tag_policy(self, tag_key: str): self._api.do("DELETE", f"/api/2.1/tag-policies/{tag_key}", headers=headers) def get_tag_policy(self, tag_key: str) -> TagPolicy: - """Gets a single tag policy by its key. + """Gets a single tag policy by its associated governed tag's key. :param tag_key: str @@ -171,7 +171,7 @@ def get_tag_policy(self, tag_key: str) -> TagPolicy: def list_tag_policies( self, *, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[TagPolicy]: - """Lists all tag policies in the account. + """Lists the tag policies for all governed tags in the account. :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than @@ -202,7 +202,7 @@ def list_tag_policies( query["page_token"] = json["next_page_token"] def update_tag_policy(self, tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy: - """Updates an existing tag policy. + """Updates an existing tag policy for a single governed tag. :param tag_key: str :param tag_policy: :class:`TagPolicy` diff --git a/docs/account/billing/billable_usage.rst b/docs/account/billing/billable_usage.rst index baacbee84..93baf9294 100644 --- a/docs/account/billing/billable_usage.rst +++ b/docs/account/billing/billable_usage.rst @@ -22,17 +22,17 @@ Returns billable usage logs in CSV format for the specified account and date range. For the data schema, see: - + - AWS: [CSV file schema]. - GCP: [CSV file schema]. - + Note that this method might take multiple minutes to complete. - + **Warning**: Depending on the queried date range, the number of workspaces in the account, the size of the response and the internet speed of the caller, this API may hit a timeout after a few minutes. If you experience this, try to mitigate by calling the API with narrower date ranges. - + [CSV file schema]: https://docs.gcp.databricks.com/administration-guide/account-settings/usage-analysis.html#csv-file-schema - + :param start_month: str Format specification for month in the format `YYYY-MM`. This is used to specify billable usage `start_month` and `end_month` properties. **Note**: Billable usage logs are unavailable before March @@ -43,6 +43,6 @@ Specify whether to include personally identifiable information in the billable usage logs, for example the email addresses of cluster creators. Handle this information with care. Defaults to false. - + :returns: :class:`DownloadResponse` \ No newline at end of file diff --git a/docs/account/billing/budget_policy.rst b/docs/account/billing/budget_policy.rst index 3c2cbd92e..b5e773ac3 100644 --- a/docs/account/billing/budget_policy.rst +++ b/docs/account/billing/budget_policy.rst @@ -9,7 +9,7 @@ .. py:method:: create( [, policy: Optional[BudgetPolicy], request_id: Optional[str]]) -> BudgetPolicy Creates a new policy. - + :param policy: :class:`BudgetPolicy` (optional) The policy to create. `policy_id` needs to be empty as it will be generated `policy_name` must be provided, custom_tags may need to be provided depending on the cloud provider. All other fields are @@ -17,34 +17,34 @@ :param request_id: str (optional) A unique identifier for this request. Restricted to 36 ASCII characters. A random UUID is recommended. This request is only idempotent if a `request_id` is provided. - + :returns: :class:`BudgetPolicy` .. py:method:: delete(policy_id: str) Deletes a policy - + :param policy_id: str The Id of the policy. - - + + .. py:method:: get(policy_id: str) -> BudgetPolicy Retrieves a policy by it's ID. - + :param policy_id: str The Id of the policy. - + :returns: :class:`BudgetPolicy` .. py:method:: list( [, filter_by: Optional[Filter], page_size: Optional[int], page_token: Optional[str], sort_spec: Optional[SortSpec]]) -> Iterator[BudgetPolicy] Lists all policies. Policies are returned in the alphabetically ascending order of their names. - + :param filter_by: :class:`Filter` (optional) A filter to apply to the list of policies. :param page_size: int (optional) @@ -53,19 +53,19 @@ :param page_token: str (optional) A page token, received from a previous `ListServerlessPolicies` call. Provide this to retrieve the subsequent page. If unspecified, the first page will be returned. - + When paginating, all other parameters provided to `ListServerlessPoliciesRequest` must match the call that provided the page token. :param sort_spec: :class:`SortSpec` (optional) The sort specification. - + :returns: Iterator over :class:`BudgetPolicy` .. py:method:: update(policy_id: str, policy: BudgetPolicy [, limit_config: Optional[LimitConfig]]) -> BudgetPolicy Updates a policy - + :param policy_id: str The Id of the policy. This field is generated by Databricks and globally unique. :param policy: :class:`BudgetPolicy` @@ -73,6 +73,6 @@ specified even if not changed. The `policy_id` is used to identify the policy to update. :param limit_config: :class:`LimitConfig` (optional) DEPRECATED. This is redundant field as LimitConfig is part of the BudgetPolicy - + :returns: :class:`BudgetPolicy` \ No newline at end of file diff --git a/docs/account/billing/budgets.rst b/docs/account/billing/budgets.rst index acbfbfb68..3b1323ba1 100644 --- a/docs/account/billing/budgets.rst +++ b/docs/account/billing/budgets.rst @@ -58,10 +58,10 @@ Create a new budget configuration for an account. For full details, see https://docs.databricks.com/en/admin/account-settings/budgets.html. - + :param budget: :class:`CreateBudgetConfigurationBudget` Properties of the new budget configuration. - + :returns: :class:`CreateBudgetConfigurationResponse` @@ -69,11 +69,11 @@ Deletes a budget configuration for an account. Both account and budget configuration are specified by ID. This cannot be undone. - + :param budget_id: str The Databricks budget configuration ID. - - + + .. py:method:: get(budget_id: str) -> GetBudgetConfigurationResponse @@ -127,10 +127,10 @@ a.budgets.delete(budget_id=created.budget.budget_configuration_id) Gets a budget configuration for an account. Both account and budget configuration are specified by ID. - + :param budget_id: str The budget configuration ID - + :returns: :class:`GetBudgetConfigurationResponse` @@ -149,11 +149,11 @@ all = a.budgets.list(billing.ListBudgetConfigurationsRequest()) Gets all budgets associated with this account. - + :param page_token: str (optional) A page token received from a previous get all budget configurations call. This token can be used to retrieve the subsequent page. Requests first page if absent. - + :returns: Iterator over :class:`BudgetConfiguration` @@ -236,11 +236,11 @@ Updates a budget configuration for an account. Both account and budget configuration are specified by ID. - + :param budget_id: str The Databricks budget configuration ID. :param budget: :class:`UpdateBudgetConfigurationBudget` The updated budget. This will overwrite the budget specified by the budget ID. - + :returns: :class:`UpdateBudgetConfigurationResponse` \ No newline at end of file diff --git a/docs/account/billing/log_delivery.rst b/docs/account/billing/log_delivery.rst index ae0ea1f53..8fcdbb771 100644 --- a/docs/account/billing/log_delivery.rst +++ b/docs/account/billing/log_delivery.rst @@ -7,23 +7,23 @@ These APIs manage log delivery configurations for this account. The two supported log types for this API are _billable usage logs_ and _audit logs_. This feature is in Public Preview. This feature works with all account ID types. - + Log delivery works with all account types. However, if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account, you can optionally configure different storage destinations for each workspace. Log delivery status is also provided to know the latest status of log delivery attempts. - + The high-level flow of billable usage delivery: - + 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific bucket policy. Using Databricks APIs, call the Account API to create a [storage configuration object](:method:Storage/Create) that uses the bucket name. - + 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. For full details, including the required IAM role policies and trust relationship, see [Billable usage log delivery]. Using Databricks APIs, call the Account API to create a [credential configuration object](:method:Credentials/Create) that uses the IAM role's ARN. - + 3. **Create log delivery configuration**: Using Databricks APIs, call the Account API to [create a log delivery configuration](:method:LogDelivery/Create) that uses the credential and storage configuration objects from previous steps. You can specify if the logs should include all events of that log type in @@ -31,7 +31,7 @@ delivery). Account level log delivery applies to all current and future workspaces plus account level logs, while workspace level log delivery solely delivers logs related to the specified workspaces. You can create multiple types of delivery configurations per account. - + For billable usage delivery: * For more information about billable usage logs, see [Billable usage log delivery]. For the CSV schema, see the [Usage page]. * The delivery location is `//billable-usage/csv/`, where `` is the name of the optional delivery path @@ -40,7 +40,7 @@ workspaces (_workspace level_ logs). You can aggregate usage for your entire account by creating an _account level_ delivery configuration that delivers logs for all current and future workspaces in your account. * The files are delivered daily by overwriting the month's CSV file for each workspace. - + For audit log delivery: * For more information about about audit log delivery, see [Audit log delivery], which includes information about the used JSON schema. * The delivery location is `//workspaceId=/date=/auditlogs_.json`. @@ -50,7 +50,7 @@ level_ delivery configuration), the audit log delivery includes workspace-level audit logs for all workspaces in the account as well as account-level audit logs. See [Audit log delivery] for details. * Auditable events are typically available in logs within 15 minutes. - + [Audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html @@ -105,25 +105,25 @@ to your storage location. This requires that you already created a [credential object](:method:Credentials/Create) (which encapsulates a cross-account service IAM role) and a [storage configuration object](:method:Storage/Create) (which encapsulates an S3 bucket). - + For full details, including the required IAM role policies and bucket policies, see [Deliver and access billable usage logs] or [Configure audit logging]. - + **Note**: There is a limit on the number of log delivery configurations available per account (each limit applies separately to each log type including billable usage and audit logs). You can create a maximum of two enabled account-level delivery configurations (configurations without a workspace filter) per type. Additionally, you can create two enabled workspace-level delivery configurations per workspace for each log type, which means that the same workspace ID can occur in the workspace filter for no more than two delivery configurations per log type. - + You cannot delete a log delivery configuration, but you can disable it (see [Enable or disable log delivery configuration](:method:LogDelivery/PatchStatus)). - + [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [Deliver and access billable usage logs]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - + :param log_delivery_configuration: :class:`CreateLogDeliveryConfigurationParams` - + :returns: :class:`WrappedLogDeliveryConfiguration` @@ -175,10 +175,10 @@ ) Gets a Databricks log delivery configuration object for an account, both specified by ID. - + :param log_delivery_configuration_id: str The log delivery configuration id of customer - + :returns: :class:`GetLogDeliveryConfigurationResponse` @@ -197,7 +197,7 @@ all = a.log_delivery.list(billing.ListLogDeliveryRequest()) Gets all Databricks log delivery configurations associated with an account specified by ID. - + :param credentials_id: str (optional) The Credentials id to filter the search results with :param page_token: str (optional) @@ -207,7 +207,7 @@ The log delivery status to filter the search results with :param storage_configuration_id: str (optional) The Storage Configuration id to filter the search results with - + :returns: Iterator over :class:`LogDeliveryConfiguration` @@ -217,7 +217,7 @@ supported, so disable log delivery configurations that are no longer needed. Note that you can't re-enable a delivery configuration if this would violate the delivery configuration limits described under [Create log delivery](:method:LogDelivery/Create). - + :param log_delivery_configuration_id: str The log delivery configuration id of customer :param status: :class:`LogDeliveryConfigStatus` @@ -225,6 +225,6 @@ to `ENABLED`. You can [enable or disable the configuration](#operation/patch-log-delivery-config-status) later. Deletion of a configuration is not supported, so disable a log delivery configuration that is no longer needed. - - + + \ No newline at end of file diff --git a/docs/account/billing/usage_dashboards.rst b/docs/account/billing/usage_dashboards.rst index 4eef82411..60e9413e2 100644 --- a/docs/account/billing/usage_dashboards.rst +++ b/docs/account/billing/usage_dashboards.rst @@ -11,25 +11,25 @@ .. py:method:: create( [, dashboard_type: Optional[UsageDashboardType], workspace_id: Optional[int]]) -> CreateBillingUsageDashboardResponse Create a usage dashboard specified by workspaceId, accountId, and dashboard type. - + :param dashboard_type: :class:`UsageDashboardType` (optional) Workspace level usage dashboard shows usage data for the specified workspace ID. Global level usage dashboard shows usage data for all workspaces in the account. :param workspace_id: int (optional) The workspace ID of the workspace in which the usage dashboard is created. - + :returns: :class:`CreateBillingUsageDashboardResponse` .. py:method:: get( [, dashboard_type: Optional[UsageDashboardType], workspace_id: Optional[int]]) -> GetBillingUsageDashboardResponse Get a usage dashboard specified by workspaceId, accountId, and dashboard type. - + :param dashboard_type: :class:`UsageDashboardType` (optional) Workspace level usage dashboard shows usage data for the specified workspace ID. Global level usage dashboard shows usage data for all workspaces in the account. :param workspace_id: int (optional) The workspace ID of the workspace in which the usage dashboard is created. - + :returns: :class:`GetBillingUsageDashboardResponse` \ No newline at end of file diff --git a/docs/account/catalog/metastore_assignments.rst b/docs/account/catalog/metastore_assignments.rst index 4463ef712..6fd25aaea 100644 --- a/docs/account/catalog/metastore_assignments.rst +++ b/docs/account/catalog/metastore_assignments.rst @@ -9,26 +9,26 @@ .. py:method:: create(workspace_id: int, metastore_id: str [, metastore_assignment: Optional[CreateMetastoreAssignment]]) Creates an assignment to a metastore for a workspace - + :param workspace_id: int Workspace ID. :param metastore_id: str Unity Catalog metastore ID :param metastore_assignment: :class:`CreateMetastoreAssignment` (optional) - - + + .. py:method:: delete(workspace_id: int, metastore_id: str) Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. - + :param workspace_id: int Workspace ID. :param metastore_id: str Unity Catalog metastore ID - - + + .. py:method:: get(workspace_id: int) -> AccountsMetastoreAssignment @@ -36,10 +36,10 @@ Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is assigned a metastore, the mappig will be returned. If no metastore is assigned to the workspace, the assignment will not be found and a 404 returned. - + :param workspace_id: int Workspace ID. - + :returns: :class:`AccountsMetastoreAssignment` @@ -59,10 +59,10 @@ ws = a.metastore_assignments.list(metastore_id=os.environ["TEST_METASTORE_ID"]) Gets a list of all Databricks workspace IDs that have been assigned to given metastore. - + :param metastore_id: str Unity Catalog metastore ID - + :returns: Iterator over int @@ -70,12 +70,12 @@ Updates an assignment to a metastore for a workspace. Currently, only the default catalog may be updated. - + :param workspace_id: int Workspace ID. :param metastore_id: str Unity Catalog metastore ID :param metastore_assignment: :class:`UpdateMetastoreAssignment` (optional) - - + + \ No newline at end of file diff --git a/docs/account/catalog/metastores.rst b/docs/account/catalog/metastores.rst index bb34cf6eb..f12672c41 100644 --- a/docs/account/catalog/metastores.rst +++ b/docs/account/catalog/metastores.rst @@ -10,49 +10,49 @@ .. py:method:: create( [, metastore_info: Optional[CreateMetastore]]) -> AccountsMetastoreInfo Creates a Unity Catalog metastore. - + :param metastore_info: :class:`CreateMetastore` (optional) - + :returns: :class:`AccountsMetastoreInfo` .. py:method:: delete(metastore_id: str [, force: Optional[bool]]) Deletes a Unity Catalog metastore for an account, both specified by ID. - + :param metastore_id: str Unity Catalog metastore ID :param force: bool (optional) Force deletion even if the metastore is not empty. Default is false. - - + + .. py:method:: get(metastore_id: str) -> AccountsMetastoreInfo Gets a Unity Catalog metastore from an account, both specified by ID. - + :param metastore_id: str Unity Catalog metastore ID - + :returns: :class:`AccountsMetastoreInfo` .. py:method:: list() -> Iterator[MetastoreInfo] Gets all Unity Catalog metastores associated with an account specified by ID. - - + + :returns: Iterator over :class:`MetastoreInfo` .. py:method:: update(metastore_id: str [, metastore_info: Optional[UpdateMetastore]]) -> AccountsMetastoreInfo Updates an existing Unity Catalog metastore. - + :param metastore_id: str Unity Catalog metastore ID :param metastore_info: :class:`UpdateMetastore` (optional) - + :returns: :class:`AccountsMetastoreInfo` \ No newline at end of file diff --git a/docs/account/catalog/storage_credentials.rst b/docs/account/catalog/storage_credentials.rst index f62632e8f..eca5c17c9 100644 --- a/docs/account/catalog/storage_credentials.rst +++ b/docs/account/catalog/storage_credentials.rst @@ -9,17 +9,17 @@ .. py:method:: create(metastore_id: str [, credential_info: Optional[CreateStorageCredential]]) -> AccountsStorageCredentialInfo Creates a new storage credential. The request object is specific to the cloud: - + * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials * **GcpServiceAcountKey** for GCP credentials. - + The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. - + :param metastore_id: str Unity Catalog metastore ID :param credential_info: :class:`CreateStorageCredential` (optional) - + :returns: :class:`AccountsStorageCredentialInfo` @@ -27,37 +27,37 @@ Deletes a storage credential from the metastore. The caller must be an owner of the storage credential. - + :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str Name of the storage credential. :param force: bool (optional) Force deletion even if the Storage Credential is not empty. Default is false. - - + + .. py:method:: get(metastore_id: str, storage_credential_name: str) -> AccountsStorageCredentialInfo Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have a level of privilege on the storage credential. - + :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str Name of the storage credential. - + :returns: :class:`AccountsStorageCredentialInfo` .. py:method:: list(metastore_id: str) -> Iterator[StorageCredentialInfo] Gets a list of all storage credentials that have been assigned to given metastore. - + :param metastore_id: str Unity Catalog metastore ID - + :returns: Iterator over :class:`StorageCredentialInfo` @@ -65,12 +65,12 @@ Updates a storage credential on the metastore. The caller must be the owner of the storage credential. If the caller is a metastore admin, only the __owner__ credential can be changed. - + :param metastore_id: str Unity Catalog metastore ID :param storage_credential_name: str Name of the storage credential. :param credential_info: :class:`UpdateStorageCredential` (optional) - + :returns: :class:`AccountsStorageCredentialInfo` \ No newline at end of file diff --git a/docs/account/iam/access_control.rst b/docs/account/iam/access_control.rst index 0e271f62e..1d37b4631 100644 --- a/docs/account/iam/access_control.rst +++ b/docs/account/iam/access_control.rst @@ -12,15 +12,15 @@ Gets all the roles that can be granted on an account level resource. A role is grantable if the rule set on the resource can contain an access rule of the role. - + :param resource: str The resource name for which assignable roles will be listed. - + Examples | Summary :--- | :--- `resource=accounts/` | A resource name for the account. `resource=accounts//groups/` | A resource name for the group. `resource=accounts//servicePrincipals/` | A resource name for the service principal. - + :returns: :class:`GetAssignableRolesForResourceResponse` @@ -28,10 +28,10 @@ Get a rule set by its name. A rule set is always attached to a resource and contains a list of access rules on the said resource. Currently only a default rule set for each resource is supported. - + :param name: str The ruleset name associated with the request. - + Examples | Summary :--- | :--- `name=accounts//ruleSets/default` | A name for a rule set on the account. `name=accounts//groups//ruleSets/default` | A name for a rule set on the group. @@ -44,11 +44,11 @@ modify -> write pattern to perform rule set updates in order to avoid race conditions that is get an etag from a GET rule set request, and pass it with the PUT update request to identify the rule set version you are updating. - + Examples | Summary :--- | :--- `etag=` | An empty etag can only be used in GET to indicate no freshness requirements. `etag=RENUAAABhSweA4NvVmmUYdiU717H3Tgy0UJdor3gE4a+mq/oj9NjAf8ZsQ==` | An etag encoded a specific version of the rule set to get or to be updated. - + :returns: :class:`RuleSetResponse` @@ -56,10 +56,10 @@ Replace the rules of a rule set. First, use get to read the current version of the rule set before modifying it. This pattern helps prevent conflicts between concurrent updates. - + :param name: str Name of the rule set. :param rule_set: :class:`RuleSetUpdateRequest` - + :returns: :class:`RuleSetResponse` \ No newline at end of file diff --git a/docs/account/iam/groups.rst b/docs/account/iam/groups.rst index dbe024c2a..454da404a 100644 --- a/docs/account/iam/groups.rst +++ b/docs/account/iam/groups.rst @@ -6,7 +6,7 @@ Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects. - + It is best practice to assign access to workspaces and access-control policies in Unity Catalog to groups, instead of to users individually. All Databricks account identities can be assigned as members of groups, and members inherit permissions that are assigned to their group. @@ -14,15 +14,16 @@ .. py:method:: create( [, display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], id: Optional[str], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]], schemas: Optional[List[GroupSchema]]]) -> Group Creates a group in the Databricks account with a unique name, using the supplied group details. - + :param display_name: str (optional) String that represents a human-readable group name :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the group. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param id: str (optional) Databricks group ID @@ -33,27 +34,27 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`GroupSchema`] (optional) The schema of the group. - + :returns: :class:`Group` .. py:method:: delete(id: str) Deletes a group from the Databricks account. - + :param id: str Unique ID for a group in the Databricks account. - - + + .. py:method:: get(id: str) -> Group Gets the information for a specific group in the Databricks account. - + :param id: str Unique ID for a group in the Databricks account. - + :returns: :class:`Group` @@ -62,7 +63,7 @@ Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint will not return members. Instead, members should be retrieved by iterating through `Get group details`. - + :param attributes: str (optional) Comma-separated list of attributes to return in response. :param count: int (optional) @@ -74,7 +75,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. @@ -82,27 +83,27 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: Iterator over :class:`Group` .. py:method:: patch(id: str [, operations: Optional[List[Patch]], schemas: Optional[List[PatchSchema]]]) Partially updates the details of a group. - + :param id: str Unique ID in the Databricks workspace. :param operations: List[:class:`Patch`] (optional) :param schemas: List[:class:`PatchSchema`] (optional) The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. - - + + .. py:method:: update(id: str [, display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]], schemas: Optional[List[GroupSchema]]]) Updates the details of a group by replacing the entire group entity. - + :param id: str Databricks group ID :param display_name: str (optional) @@ -110,9 +111,10 @@ :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the group. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param members: List[:class:`ComplexValue`] (optional) :param meta: :class:`ResourceMeta` (optional) @@ -121,6 +123,6 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`GroupSchema`] (optional) The schema of the group. - - + + \ No newline at end of file diff --git a/docs/account/iam/service_principals.rst b/docs/account/iam/service_principals.rst index 6ec4fb814..df537b79f 100644 --- a/docs/account/iam/service_principals.rst +++ b/docs/account/iam/service_principals.rst @@ -29,7 +29,7 @@ a.service_principals.delete(id=sp_create.id) Creates a new service principal in the Databricks account. - + :param active: bool (optional) If this user is active :param application_id: str (optional) @@ -39,7 +39,7 @@ :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the service principal. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) :param groups: List[:class:`ComplexValue`] (optional) @@ -49,18 +49,18 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`ServicePrincipalSchema`] (optional) The schema of the List response. - + :returns: :class:`ServicePrincipal` .. py:method:: delete(id: str) Delete a single service principal in the Databricks account. - + :param id: str Unique ID for a service principal in the Databricks account. - - + + .. py:method:: get(id: str) -> ServicePrincipal @@ -84,10 +84,10 @@ a.service_principals.delete(id=sp_create.id) Gets the details for a single service principal define in the Databricks account. - + :param id: str Unique ID for a service principal in the Databricks account. - + :returns: :class:`ServicePrincipal` @@ -114,7 +114,7 @@ a.service_principals.delete(id=sp_create.id) Gets the set of service principals associated with a Databricks account. - + :param attributes: str (optional) Comma-separated list of attributes to return in response. :param count: int (optional) @@ -126,7 +126,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. @@ -134,7 +134,7 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: Iterator over :class:`ServicePrincipal` @@ -166,14 +166,14 @@ a.service_principals.delete(id=sp_create.id) Partially updates the details of a single service principal in the Databricks account. - + :param id: str Unique ID in the Databricks workspace. :param operations: List[:class:`Patch`] (optional) :param schemas: List[:class:`PatchSchema`] (optional) The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. - - + + .. py:method:: update(id: str [, active: Optional[bool], application_id: Optional[str], display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], roles: Optional[List[ComplexValue]], schemas: Optional[List[ServicePrincipalSchema]]]) @@ -199,9 +199,9 @@ a.service_principals.delete(id=sp_create.id) Updates the details of a single service principal. - + This action replaces the existing service principal with the same name. - + :param id: str Databricks service principal ID. :param active: bool (optional) @@ -213,7 +213,7 @@ :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the service principal. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) :param groups: List[:class:`ComplexValue`] (optional) @@ -221,6 +221,6 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`ServicePrincipalSchema`] (optional) The schema of the List response. - - + + \ No newline at end of file diff --git a/docs/account/iam/users.rst b/docs/account/iam/users.rst index 4ddf58a71..0de9484ca 100644 --- a/docs/account/iam/users.rst +++ b/docs/account/iam/users.rst @@ -5,7 +5,7 @@ .. py:class:: AccountUsersAPI User identities recognized by Databricks and represented by email addresses. - + Databricks recommends using SCIM provisioning to sync users and groups automatically from your identity provider to your Databricks account. SCIM streamlines onboarding a new employee or team by using your identity provider to create users and groups in Databricks account and give them the proper level of @@ -37,20 +37,20 @@ Creates a new user in the Databricks account. This new user will also be added to the Databricks account. - + :param active: bool (optional) If this user is active :param display_name: str (optional) String that represents a concatenation of given and family names. For example `John Smith`. This field cannot be updated through the Workspace SCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to update `displayName`. - + [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation :param emails: List[:class:`ComplexValue`] (optional) All the emails associated with the Databricks user. :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the user. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) External ID is not currently supported. It is reserved for future use. @@ -64,7 +64,7 @@ The schema of the user. :param user_name: str (optional) Email address of the Databricks user. - + :returns: :class:`User` @@ -90,11 +90,11 @@ Deletes a user. Deleting a user from a Databricks account also removes objects associated with the user. - + :param id: str Unique ID for a user in the Databricks account. - - + + .. py:method:: get(id: str [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[GetSortOrder], start_index: Optional[int]]) -> User @@ -121,7 +121,7 @@ a.users.delete(id=user.id) Gets information for a specific user in Databricks account. - + :param id: str Unique ID for a user in the Databricks account. :param attributes: str (optional) @@ -135,7 +135,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. Multi-part paths are supported. For example, `userName`, @@ -144,14 +144,14 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: :class:`User` .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[User] Gets details for all the users associated with a Databricks account. - + :param attributes: str (optional) Comma-separated list of attributes to return in response. :param count: int (optional) @@ -163,7 +163,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. Multi-part paths are supported. For example, `userName`, @@ -172,7 +172,7 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: Iterator over :class:`User` @@ -210,20 +210,20 @@ a.users.delete(id=user.id) Partially updates a user resource by applying the supplied operations on specific user attributes. - + :param id: str Unique ID in the Databricks workspace. :param operations: List[:class:`Patch`] (optional) :param schemas: List[:class:`PatchSchema`] (optional) The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. - - + + .. py:method:: update(id: str [, active: Optional[bool], display_name: Optional[str], emails: Optional[List[ComplexValue]], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], name: Optional[Name], roles: Optional[List[ComplexValue]], schemas: Optional[List[UserSchema]], user_name: Optional[str]]) Replaces a user's information with the data supplied in request. - + :param id: str Databricks user ID. :param active: bool (optional) @@ -232,13 +232,13 @@ String that represents a concatenation of given and family names. For example `John Smith`. This field cannot be updated through the Workspace SCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to update `displayName`. - + [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation :param emails: List[:class:`ComplexValue`] (optional) All the emails associated with the Databricks user. :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the user. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) External ID is not currently supported. It is reserved for future use. @@ -250,6 +250,6 @@ The schema of the user. :param user_name: str (optional) Email address of the Databricks user. - - + + \ No newline at end of file diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 133b16f3d..9bb4c1420 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -11,22 +11,22 @@ Deletes the workspace permissions assignment in a given account and workspace for the specified principal. - + :param workspace_id: int The workspace ID for the account. :param principal_id: int The ID of the user, service principal, or group. - - + + .. py:method:: get(workspace_id: int) -> WorkspacePermissions Get an array of workspace permissions for the specified account and workspace. - + :param workspace_id: int The workspace ID. - + :returns: :class:`WorkspacePermissions` @@ -48,10 +48,10 @@ all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. - + :param workspace_id: int The workspace ID for the account. - + :returns: Iterator over :class:`PermissionAssignment` @@ -84,7 +84,7 @@ Creates or updates the workspace permissions assignment in a given account and workspace for the specified principal. - + :param workspace_id: int The workspace ID. :param principal_id: int @@ -95,6 +95,6 @@ will be ignored. Note that excluding this field, or providing unsupported values, will have the same effect as providing an empty list, which will result in the deletion of all permissions for the principal. - + :returns: :class:`PermissionAssignment` \ No newline at end of file diff --git a/docs/account/oauth2/custom_app_integration.rst b/docs/account/oauth2/custom_app_integration.rst index 09a4ce463..5ec0faad2 100644 --- a/docs/account/oauth2/custom_app_integration.rst +++ b/docs/account/oauth2/custom_app_integration.rst @@ -10,9 +10,9 @@ .. py:method:: create( [, confidential: Optional[bool], name: Optional[str], redirect_urls: Optional[List[str]], scopes: Optional[List[str]], token_access_policy: Optional[TokenAccessPolicy], user_authorized_scopes: Optional[List[str]]]) -> CreateCustomAppIntegrationOutput Create Custom OAuth App Integration. - + You can retrieve the custom OAuth app integration via :method:CustomAppIntegration/get. - + :param confidential: bool (optional) This field indicates whether an OAuth client secret is required to authenticate this client. :param name: str (optional) @@ -27,7 +27,7 @@ :param user_authorized_scopes: List[str] (optional) Scopes that will need to be consented by end user to mint the access token. If the user does not authorize the access token will not be minted. Must be a subset of scopes. - + :returns: :class:`CreateCustomAppIntegrationOutput` @@ -35,30 +35,30 @@ Delete an existing Custom OAuth App Integration. You can retrieve the custom OAuth app integration via :method:CustomAppIntegration/get. - + :param integration_id: str - - + + .. py:method:: get(integration_id: str) -> GetCustomAppIntegrationOutput Gets the Custom OAuth App Integration for the given integration id. - + :param integration_id: str The OAuth app integration ID. - + :returns: :class:`GetCustomAppIntegrationOutput` .. py:method:: list( [, include_creator_username: Optional[bool], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[GetCustomAppIntegrationOutput] Get the list of custom OAuth app integrations for the specified Databricks account - + :param include_creator_username: bool (optional) :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`GetCustomAppIntegrationOutput` @@ -66,7 +66,7 @@ Updates an existing custom OAuth App Integration. You can retrieve the custom OAuth app integration via :method:CustomAppIntegration/get. - + :param integration_id: str :param redirect_urls: List[str] (optional) List of OAuth redirect urls to be updated in the custom OAuth app integration @@ -78,6 +78,6 @@ :param user_authorized_scopes: List[str] (optional) Scopes that will need to be consented by end user to mint the access token. If the user does not authorize the access token will not be minted. Must be a subset of scopes. - - + + \ No newline at end of file diff --git a/docs/account/oauth2/federation_policy.rst b/docs/account/oauth2/federation_policy.rst index a8957e5f2..9eed9396b 100644 --- a/docs/account/oauth2/federation_policy.rst +++ b/docs/account/oauth2/federation_policy.rst @@ -5,20 +5,20 @@ .. py:class:: AccountFederationPolicyAPI These APIs manage account federation policies. - + Account federation policies allow users and service principals in your Databricks account to securely access Databricks APIs using tokens from your trusted identity providers (IdPs). - + With token federation, your users and service principals can exchange tokens from your IdP for Databricks OAuth tokens, which can be used to access Databricks APIs. Token federation eliminates the need to manage Databricks secrets, and allows you to centralize management of token issuance policies in your IdP. Databricks token federation is typically used in combination with [SCIM], so users in your IdP are synchronized into your Databricks account. - + Token federation is configured in your Databricks account using an account federation policy. An account federation policy specifies: * which IdP, or issuer, your Databricks account should accept tokens from * how to determine which Databricks user, or subject, a token is issued for - + To configure a federation policy, you provide the following: * The required token __issuer__, as specified in the “iss” claim of your tokens. The issuer is an https URL that identifies your IdP. * The allowed token __audiences__, as specified in the “aud” claim of your tokens. This identifier is intended to @@ -29,68 +29,68 @@ public keys used to validate the signature of your tokens, in JWKS format. If unspecified (recommended), Databricks automatically fetches the public keys from your issuer’s well known endpoint. Databricks strongly recommends relying on your issuer’s well known endpoint for discovering public keys. - + An example federation policy is: ``` issuer: "https://idp.mycompany.com/oidc" audiences: ["databricks"] subject_claim: "sub" ``` - + An example JWT token body that matches this policy and could be used to authenticate to Databricks as user `username@mycompany.com` is: ``` { "iss": "https://idp.mycompany.com/oidc", "aud": "databricks", "sub": "username@mycompany.com" } ``` - + You may also need to configure your IdP to generate tokens for your users to exchange with Databricks, if your users do not already have the ability to generate tokens that are compatible with your federation policy. - + You do not need to configure an OAuth application in Databricks to use token federation. - + [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html .. py:method:: create(policy: FederationPolicy [, policy_id: Optional[str]]) -> FederationPolicy Create account federation policy. - + :param policy: :class:`FederationPolicy` :param policy_id: str (optional) The identifier for the federation policy. The identifier must contain only lowercase alphanumeric characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks. - + :returns: :class:`FederationPolicy` .. py:method:: delete(policy_id: str) Delete account federation policy. - + :param policy_id: str The identifier for the federation policy. - - + + .. py:method:: get(policy_id: str) -> FederationPolicy Get account federation policy. - + :param policy_id: str The identifier for the federation policy. - + :returns: :class:`FederationPolicy` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[FederationPolicy] List account federation policies. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`FederationPolicy` .. py:method:: update(policy_id: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy Update account federation policy. - + :param policy_id: str The identifier for the federation policy. :param policy: :class:`FederationPolicy` @@ -100,6 +100,6 @@ should be updated (full replacement). If unspecified, all fields that are set in the policy provided in the update request will overwrite the corresponding fields in the existing policy. Example value: 'description,oidc_policy.audiences'. - + :returns: :class:`FederationPolicy` \ No newline at end of file diff --git a/docs/account/oauth2/o_auth_published_apps.rst b/docs/account/oauth2/o_auth_published_apps.rst index 98479d720..fb3ddc1f1 100644 --- a/docs/account/oauth2/o_auth_published_apps.rst +++ b/docs/account/oauth2/o_auth_published_apps.rst @@ -11,11 +11,11 @@ .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[PublishedAppOutput] Get all the available published OAuth apps in Databricks. - + :param page_size: int (optional) The max number of OAuth published apps to return in one page. :param page_token: str (optional) A token that can be used to get the next page of results. - + :returns: Iterator over :class:`PublishedAppOutput` \ No newline at end of file diff --git a/docs/account/oauth2/published_app_integration.rst b/docs/account/oauth2/published_app_integration.rst index df635113b..2c97726d6 100644 --- a/docs/account/oauth2/published_app_integration.rst +++ b/docs/account/oauth2/published_app_integration.rst @@ -10,14 +10,14 @@ .. py:method:: create( [, app_id: Optional[str], token_access_policy: Optional[TokenAccessPolicy]]) -> CreatePublishedAppIntegrationOutput Create Published OAuth App Integration. - + You can retrieve the published OAuth app integration via :method:PublishedAppIntegration/get. - + :param app_id: str (optional) App id of the OAuth published app integration. For example power-bi, tableau-deskop :param token_access_policy: :class:`TokenAccessPolicy` (optional) Token access policy - + :returns: :class:`CreatePublishedAppIntegrationOutput` @@ -25,28 +25,28 @@ Delete an existing Published OAuth App Integration. You can retrieve the published OAuth app integration via :method:PublishedAppIntegration/get. - + :param integration_id: str - - + + .. py:method:: get(integration_id: str) -> GetPublishedAppIntegrationOutput Gets the Published OAuth App Integration for the given integration id. - + :param integration_id: str - + :returns: :class:`GetPublishedAppIntegrationOutput` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[GetPublishedAppIntegrationOutput] Get the list of published OAuth app integrations for the specified Databricks account - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`GetPublishedAppIntegrationOutput` @@ -54,10 +54,10 @@ Updates an existing published OAuth App Integration. You can retrieve the published OAuth app integration via :method:PublishedAppIntegration/get. - + :param integration_id: str :param token_access_policy: :class:`TokenAccessPolicy` (optional) Token access policy to be updated in the published OAuth app integration - - + + \ No newline at end of file diff --git a/docs/account/oauth2/service_principal_federation_policy.rst b/docs/account/oauth2/service_principal_federation_policy.rst index 3f7f275ee..2063b6b83 100644 --- a/docs/account/oauth2/service_principal_federation_policy.rst +++ b/docs/account/oauth2/service_principal_federation_policy.rst @@ -5,22 +5,22 @@ .. py:class:: ServicePrincipalFederationPolicyAPI These APIs manage service principal federation policies. - + Service principal federation, also known as Workload Identity Federation, allows your automated workloads running outside of Databricks to securely access Databricks APIs without the need for Databricks secrets. With Workload Identity Federation, your application (or workload) authenticates to Databricks as a Databricks service principal, using tokens provided by the workload runtime. - + Databricks strongly recommends using Workload Identity Federation to authenticate to Databricks from automated workloads, over alternatives such as OAuth client secrets or Personal Access Tokens, whenever possible. Workload Identity Federation is supported by many popular services, including Github Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes clusters, among others. - + Workload identity federation is configured in your Databricks account using a service principal federation policy. A service principal federation policy specifies: * which IdP, or issuer, the service principal is allowed to authenticate from * which workload identity, or subject, is allowed to authenticate as the Databricks service principal - + To configure a federation policy, you provide the following: * The required token __issuer__, as specified in the “iss” claim of workload identity tokens. The issuer is an https URL that identifies the workload identity provider. * The required token __subject__, as specified in the “sub” claim of @@ -32,73 +32,73 @@ of the workload identity tokens, in JWKS format. If unspecified (recommended), Databricks automatically fetches the public keys from the issuer’s well known endpoint. Databricks strongly recommends relying on the issuer’s well known endpoint for discovering public keys. - + An example service principal federation policy, for a Github Actions workload, is: ``` issuer: "https://token.actions.githubusercontent.com" audiences: ["https://github.com/my-github-org"] subject: "repo:my-github-org/my-repo:environment:prod" ``` - + An example JWT token body that matches this policy and could be used to authenticate to Databricks is: ``` { "iss": "https://token.actions.githubusercontent.com", "aud": "https://github.com/my-github-org", "sub": "repo:my-github-org/my-repo:environment:prod" } ``` - + You may also need to configure the workload runtime to generate tokens for your workloads. - + You do not need to configure an OAuth application in Databricks to use token federation. .. py:method:: create(service_principal_id: int, policy: FederationPolicy [, policy_id: Optional[str]]) -> FederationPolicy Create account federation policy. - + :param service_principal_id: int The service principal id for the federation policy. :param policy: :class:`FederationPolicy` :param policy_id: str (optional) The identifier for the federation policy. The identifier must contain only lowercase alphanumeric characters, numbers, hyphens, and slashes. If unspecified, the id will be assigned by Databricks. - + :returns: :class:`FederationPolicy` .. py:method:: delete(service_principal_id: int, policy_id: str) Delete account federation policy. - + :param service_principal_id: int The service principal id for the federation policy. :param policy_id: str The identifier for the federation policy. - - + + .. py:method:: get(service_principal_id: int, policy_id: str) -> FederationPolicy Get account federation policy. - + :param service_principal_id: int The service principal id for the federation policy. :param policy_id: str The identifier for the federation policy. - + :returns: :class:`FederationPolicy` .. py:method:: list(service_principal_id: int [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[FederationPolicy] List account federation policies. - + :param service_principal_id: int The service principal id for the federation policy. :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`FederationPolicy` .. py:method:: update(service_principal_id: int, policy_id: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy Update account federation policy. - + :param service_principal_id: int The service principal id for the federation policy. :param policy_id: str @@ -110,6 +110,6 @@ should be updated (full replacement). If unspecified, all fields that are set in the policy provided in the update request will overwrite the corresponding fields in the existing policy. Example value: 'description,oidc_policy.audiences'. - + :returns: :class:`FederationPolicy` \ No newline at end of file diff --git a/docs/account/oauth2/service_principal_secrets.rst b/docs/account/oauth2/service_principal_secrets.rst index e95b779f0..ba2f44bab 100644 --- a/docs/account/oauth2/service_principal_secrets.rst +++ b/docs/account/oauth2/service_principal_secrets.rst @@ -5,48 +5,47 @@ .. py:class:: ServicePrincipalSecretsAPI These APIs enable administrators to manage service principal secrets. - + You can use the generated secrets to obtain OAuth access tokens for a service principal, which can then be used to access Databricks Accounts and Workspace APIs. For more information, see [Authentication using OAuth tokens for service principals]. - + In addition, the generated secrets can be used to configure the Databricks Terraform Provider to authenticate with the service principal. For more information, see [Databricks Terraform Provider]. - + [Authentication using OAuth tokens for service principals]: https://docs.databricks.com/dev-tools/authentication-oauth.html [Databricks Terraform Provider]: https://github.com/databricks/terraform-provider-databricks/blob/master/docs/index.md#authenticating-with-service-principal - .. py:method:: create(service_principal_id: str [, lifetime: Optional[str]]) -> CreateServicePrincipalSecretResponse Create a secret for the given service principal. - + :param service_principal_id: str The service principal ID. :param lifetime: str (optional) The lifetime of the secret in seconds. If this parameter is not provided, the secret will have a default lifetime of 730 days (63072000s). - + :returns: :class:`CreateServicePrincipalSecretResponse` .. py:method:: delete(service_principal_id: str, secret_id: str) Delete a secret from the given service principal. - + :param service_principal_id: str The service principal ID. :param secret_id: str The secret ID. - - + + .. py:method:: list(service_principal_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SecretInfo] List all secrets associated with the given service principal. This operation only returns information about the secrets themselves and does not include the secret values. - + :param service_principal_id: str The service principal ID. :param page_size: int (optional) @@ -57,6 +56,6 @@ previous request. To list all of the secrets for a service principal, it is necessary to continue requesting pages of entries until the response contains no `next_page_token`. Note that the number of entries returned must not be used to determine when the listing is complete. - + :returns: Iterator over :class:`SecretInfo` \ No newline at end of file diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index acb958c8c..042d57f00 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -38,19 +38,19 @@ specified account. Databricks uses this to set up network infrastructure properly to host Databricks clusters. For your AWS IAM role, you need to trust the External ID (the Databricks Account API account ID) in the returned credential object, and configure the required access policy. - + Save the response's `credentials_id` field, which is the ID for your new credential configuration object. - + For information about how to create a new workspace with this API, see [Create a new workspace using the Account API] - + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - + :param credentials_name: str The human-readable name of the credential configuration object. :param aws_credentials: :class:`CreateCredentialAwsCredentials` - + :returns: :class:`Credential` @@ -58,11 +58,11 @@ Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace. - + :param credentials_id: str Databricks Account API credential configuration ID - - + + .. py:method:: get(credentials_id: str) -> Credential @@ -93,10 +93,10 @@ a.credentials.delete(credentials_id=role.credentials_id) Gets a Databricks credential configuration object for an account, both specified by ID. - + :param credentials_id: str Databricks Account API credential configuration ID - + :returns: :class:`Credential` @@ -114,7 +114,7 @@ configs = a.credentials.list() Gets all Databricks credential configurations associated with an account specified by ID. - - + + :returns: Iterator over :class:`Credential` \ No newline at end of file diff --git a/docs/account/provisioning/encryption_keys.rst b/docs/account/provisioning/encryption_keys.rst index 3e190c250..a8247db0f 100644 --- a/docs/account/provisioning/encryption_keys.rst +++ b/docs/account/provisioning/encryption_keys.rst @@ -7,11 +7,11 @@ These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations: - + * Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane. - + In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you @@ -50,18 +50,18 @@ specified as a workspace's customer-managed key for workspace storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data. - + **Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions that currently support creation of Databricks workspaces. - + This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - + :param use_cases: List[:class:`KeyUseCase`] The cases that the key can be used for. :param aws_key_info: :class:`CreateAwsKeyInfo` (optional) :param gcp_key_info: :class:`CreateGcpKeyInfo` (optional) - + :returns: :class:`CustomerManagedKey` @@ -69,11 +69,11 @@ Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace. - + :param customer_managed_key_id: str Databricks encryption key configuration ID. - - + + .. py:method:: get(customer_managed_key_id: str) -> CustomerManagedKey @@ -110,15 +110,15 @@ specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data. - + **Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - + This operation is available only if your account is on the E2 version of the platform.", - + :param customer_managed_key_id: str Databricks encryption key configuration ID. - + :returns: :class:`CustomerManagedKey` @@ -140,12 +140,12 @@ notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane. - + **Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - + This operation is available only if your account is on the E2 version of the platform. - - + + :returns: Iterator over :class:`CustomerManagedKey` \ No newline at end of file diff --git a/docs/account/provisioning/networks.rst b/docs/account/provisioning/networks.rst index d558cdcf5..b5601b1bc 100644 --- a/docs/account/provisioning/networks.rst +++ b/docs/account/provisioning/networks.rst @@ -29,7 +29,7 @@ Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets. - + :param network_name: str The human-readable name of the network configuration. :param gcp_network_info: :class:`GcpNetworkInfo` (optional) @@ -43,7 +43,7 @@ :param vpc_id: str (optional) The ID of the VPC associated with this network. VPC IDs can be used in multiple network configurations. - + :returns: :class:`Network` @@ -51,13 +51,13 @@ Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace. - + This operation is available only if your account is on the E2 version of the platform. - + :param network_id: str Databricks Account API network configuration ID. - - + + .. py:method:: get(network_id: str) -> Network @@ -83,10 +83,10 @@ by_id = a.networks.get(network_id=netw.network_id) Gets a Databricks network configuration, which represents a cloud VPC and its resources. - + :param network_id: str Databricks Account API network configuration ID. - + :returns: :class:`Network` @@ -104,9 +104,9 @@ configs = a.networks.list() Gets a list of all Databricks network configurations for an account, specified by ID. - + This operation is available only if your account is on the E2 version of the platform. - - + + :returns: Iterator over :class:`Network` \ No newline at end of file diff --git a/docs/account/provisioning/private_access.rst b/docs/account/provisioning/private_access.rst index 8b28bcecb..b9e3aec84 100644 --- a/docs/account/provisioning/private_access.rst +++ b/docs/account/provisioning/private_access.rst @@ -31,16 +31,16 @@ Creates a private access settings object, which specifies how your workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the workspace's `private_access_settings_id` property. - + You can share one private access settings with multiple workspaces in a single account. However, private access settings are specific to AWS regions, so only workspaces in the same AWS region can use a given private access settings object. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - + :param private_access_settings_name: str The human-readable name of the private access settings object. :param region: str @@ -49,21 +49,21 @@ An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in AWS. - + Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints that in your account that can connect to your workspace over AWS PrivateLink. - + If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this control only works for PrivateLink connections. To control how your workspace is accessed via public internet, see [IP access lists]. - + [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html :param private_access_level: :class:`PrivateAccessLevel` (optional) :param public_access_enabled: bool (optional) Determines if the workspace can be accessed over public internet. For fully private workspaces, you can optionally specify `false`, but only if you implement both the front-end and the back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled. - + :returns: :class:`PrivateAccessSettings` @@ -71,16 +71,16 @@ Deletes a private access settings object, which determines how your workspace is accessed over [AWS PrivateLink]. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - + :param private_access_settings_id: str Databricks Account API private access settings ID. - - + + .. py:method:: get(private_access_settings_id: str) -> PrivateAccessSettings @@ -109,15 +109,15 @@ Gets a private access settings object, which specifies how your workspace is accessed over [AWS PrivateLink]. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - + :param private_access_settings_id: str Databricks Account API private access settings ID. - + :returns: :class:`PrivateAccessSettings` @@ -135,8 +135,8 @@ all = a.private_access.list() Gets a list of all private access settings objects for an account, specified by ID. - - + + :returns: Iterator over :class:`PrivateAccessSettings` @@ -171,21 +171,21 @@ Updates an existing private access settings object, which specifies how your workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings object referenced by ID in the workspace's `private_access_settings_id` property. - + This operation completely overwrites your existing private access settings object attached to your workspaces. All workspaces attached to the private access settings are affected by any change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of these changes might take several minutes to propagate to the workspace API. - + You can share one private access settings object with multiple workspaces in a single account. However, private access settings are specific to AWS regions, so only workspaces in the same AWS region can use a given private access settings object. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - + :param private_access_settings_id: str Databricks Account API private access settings ID. :param private_access_settings_name: str @@ -196,20 +196,20 @@ An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when registering the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC endpoint in AWS. - + Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC endpoints that in your account that can connect to your workspace over AWS PrivateLink. - + If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`, this control only works for PrivateLink connections. To control how your workspace is accessed via public internet, see [IP access lists]. - + [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html :param private_access_level: :class:`PrivateAccessLevel` (optional) :param public_access_enabled: bool (optional) Determines if the workspace can be accessed over public internet. For fully private workspaces, you can optionally specify `false`, but only if you implement both the front-end and the back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is enabled. - - + + \ No newline at end of file diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index a72721a6d..37f832cf0 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -36,16 +36,16 @@ object that represents the root AWS S3 bucket in your account. Databricks stores related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the required bucket policy. - + For information about how to create a new workspace with this API, see [Create a new workspace using the Account API] - + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - + :param storage_configuration_name: str The human-readable name of the storage configuration. :param root_bucket_info: :class:`RootBucketInfo` - + :returns: :class:`StorageConfiguration` @@ -53,11 +53,11 @@ Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace. - + :param storage_configuration_id: str Databricks Account API storage configuration ID. - - + + .. py:method:: get(storage_configuration_id: str) -> StorageConfiguration @@ -82,10 +82,10 @@ by_id = a.storage.get(storage_configuration_id=storage.storage_configuration_id) Gets a Databricks storage configuration for an account, both specified by ID. - + :param storage_configuration_id: str Databricks Account API storage configuration ID. - + :returns: :class:`StorageConfiguration` @@ -103,7 +103,7 @@ configs = a.storage.list() Gets a list of all Databricks storage configurations for your account, specified by ID. - - + + :returns: Iterator over :class:`StorageConfiguration` \ No newline at end of file diff --git a/docs/account/provisioning/vpc_endpoints.rst b/docs/account/provisioning/vpc_endpoints.rst index b639f3e1b..76eb3870f 100644 --- a/docs/account/provisioning/vpc_endpoints.rst +++ b/docs/account/provisioning/vpc_endpoints.rst @@ -31,17 +31,17 @@ Creates a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to communicate privately with Databricks over [AWS PrivateLink]. - + After you create the VPC endpoint configuration, the Databricks [endpoint service] automatically accepts the VPC endpoint. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html - + :param vpc_endpoint_name: str The human-readable name of the storage configuration. :param aws_vpc_endpoint_id: str (optional) @@ -49,7 +49,7 @@ :param gcp_vpc_endpoint_info: :class:`GcpVpcEndpointInfo` (optional) :param region: str (optional) The AWS region in which this VPC endpoint object exists. - + :returns: :class:`VpcEndpoint` @@ -57,17 +57,17 @@ Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] that can communicate privately with Databricks over [AWS PrivateLink]. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - + :param vpc_endpoint_id: str Databricks VPC endpoint ID. - - + + .. py:method:: get(vpc_endpoint_id: str) -> VpcEndpoint @@ -97,13 +97,13 @@ Gets a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to communicate privately with Databricks over [AWS PrivateLink]. - + [AWS PrivateLink]: https://aws.amazon.com/privatelink [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html - + :param vpc_endpoint_id: str Databricks VPC endpoint ID. - + :returns: :class:`VpcEndpoint` @@ -121,11 +121,11 @@ all = a.vpc_endpoints.list() Gets a list of all VPC endpoints for an account, specified by ID. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink]. - + [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html - - + + :returns: Iterator over :class:`VpcEndpoint` \ No newline at end of file diff --git a/docs/account/provisioning/workspaces.rst b/docs/account/provisioning/workspaces.rst index f217f57eb..3bd84a166 100644 --- a/docs/account/provisioning/workspaces.rst +++ b/docs/account/provisioning/workspaces.rst @@ -7,7 +7,7 @@ These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs. - + These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. @@ -51,14 +51,14 @@ a.workspaces.delete(workspace_id=waiter.workspace_id) Creates a new workspace. - + **Important**: This operation is asynchronous. A response with HTTP status code 200 means the request has been accepted and is in progress, but does not mean that the workspace deployed successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the workspace ID (`workspace_id`) field in the response to identify the new workspace and make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`. - + :param workspace_name: str The workspace's human-readable name. :param aws_region: str (optional) @@ -79,22 +79,22 @@ deployment name is `abcsales`, your workspace URL will be `https://abcsales.cloud.databricks.com`. Hyphens are allowed. This property supports only the set of characters that are allowed in a subdomain. - + To set this value, you must have a deployment name prefix. Contact your Databricks account team to add an account deployment name prefix to your account. - + Workspace deployment names follow the account prefix and a hyphen. For example, if your account's deployment prefix is `acme` and the workspace deployment name is `workspace-1`, the JSON response for the `deployment_name` field becomes `acme-workspace-1`. The workspace URL would be `acme-workspace-1.cloud.databricks.com`. - + You can also set the `deployment_name` to the reserved keyword `EMPTY` if you want the deployment name to only include the deployment prefix. For example, if your account's deployment prefix is `acme` and the workspace deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and the workspace URL is `acme.cloud.databricks.com`. - + This value must be unique across all non-deleted deployments across all AWS regions. - + If a new workspace omits this property, the server generates a unique deployment name for you with the pattern `dbc-xxxxxxxx-xxxx`. :param gcp_managed_network_config: :class:`GcpManagedNetworkConfig` (optional) @@ -113,9 +113,9 @@ ID of the workspace's private access settings object. Only used for PrivateLink. This ID must be specified for customers using [AWS PrivateLink] for either front-end (user-to-workspace connection), back-end (data plane to control plane connection), or both connection types. - + Before configuring PrivateLink, read the [Databricks article about PrivateLink].", - + [AWS PrivateLink]: https://aws.amazon.com/privatelink/ [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html :param storage_configuration_id: str (optional) @@ -124,7 +124,7 @@ The ID of the workspace's storage encryption key configuration object. This is used to encrypt the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS volumes. The provided key configuration object property `use_cases` must contain `STORAGE`. - + :returns: Long-running operation waiter for :class:`Workspace`. See :method:wait_get_workspace_running for more details. @@ -138,14 +138,14 @@ Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources. - + This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - + :param workspace_id: int Workspace ID. - - + + .. py:method:: get(workspace_id: int) -> Workspace @@ -167,18 +167,18 @@ `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`. - + For information about how to create a new workspace with this API **including error handling**, see [Create a new workspace using the Account API]. - + This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - + :param workspace_id: int Workspace ID. - + :returns: :class:`Workspace` @@ -196,11 +196,11 @@ all = a.workspaces.list() Gets a list of all workspaces associated with an account, specified by ID. - + This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - - + + :returns: Iterator over :class:`Workspace` @@ -238,7 +238,7 @@ Updates a workspace configuration for either a running workspace or a failed workspace. The elements that can be updated varies between these two use cases. - + ### Update a failed workspace You can update a Databricks workspace configuration for failed workspace deployment for some fields, but not all fields. For a failed workspace, this request supports updates to the following fields only: - Credential configuration ID - Storage configuration ID - Network @@ -260,14 +260,14 @@ update the network connectivity configuration ID to ensure the workspace uses the same set of stable IP CIDR blocks to access your resources. You cannot remove a network connectivity configuration from the workspace once attached, you can only switch to another one. - + After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status. The workspace is successful if the status changes to `RUNNING`. - + For information about how to create a new workspace with this API **including error handling**, see [Create a new workspace using the Account API]. - + ### Update a running workspace You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: - Credential configuration ID - Network configuration ID. Used @@ -293,12 +293,12 @@ network connectivity configuration ID to ensure the workspace uses the same set of stable IP CIDR blocks to access your resources. You cannot remove a network connectivity configuration from the workspace once attached, you can only switch to another one. - + **Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not terminate all cluster instances in the workspace before calling this API, the request will fail. - + ### Wait until changes take effect. After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC, the workspace status becomes @@ -314,22 +314,22 @@ silently to its original configuration. After the workspace has been updated, you cannot use or create clusters for another 20 minutes. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior. - + If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the changes to fully take effect. During the 20 minute wait, it is important that you stop all REST API calls to the DBFS API. If you are modifying _only the managed services key configuration_, you can omit the 20 minute wait. - + **Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment types and subscription types. If you have questions about availability, contact your Databricks representative. - + This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - + [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html - + :param workspace_id: int Workspace ID. :param aws_region: str (optional) @@ -359,7 +359,7 @@ :param storage_customer_managed_key_id: str (optional) The ID of the key configuration object for workspace storage. This parameter is available for updating both failed and running workspaces. - + :returns: Long-running operation waiter for :class:`Workspace`. See :method:wait_get_workspace_running for more details. diff --git a/docs/account/settings/csp_enablement_account.rst b/docs/account/settings/csp_enablement_account.rst index 815203fff..f3c0b2987 100644 --- a/docs/account/settings/csp_enablement_account.rst +++ b/docs/account/settings/csp_enablement_account.rst @@ -7,28 +7,28 @@ The compliance security profile settings at the account level control whether to enable it for new workspaces. By default, this account-level setting is disabled for new workspaces. After workspace creation, account admins can enable the compliance security profile individually for each workspace. - + This settings can be disabled so that new workspaces do not have compliance security profile enabled by default. .. py:method:: get( [, etag: Optional[str]]) -> CspEnablementAccountSetting Gets the compliance security profile setting for new workspaces. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`CspEnablementAccountSetting` .. py:method:: update(allow_missing: bool, setting: CspEnablementAccountSetting, field_mask: str) -> CspEnablementAccountSetting Updates the value of the compliance security profile setting for new workspaces. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`CspEnablementAccountSetting` @@ -38,10 +38,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`CspEnablementAccountSetting` \ No newline at end of file diff --git a/docs/account/settings/disable_legacy_features.rst b/docs/account/settings/disable_legacy_features.rst index 5c3c2656d..b0d739e38 100644 --- a/docs/account/settings/disable_legacy_features.rst +++ b/docs/account/settings/disable_legacy_features.rst @@ -5,7 +5,7 @@ .. py:class:: DisableLegacyFeaturesAPI Disable legacy features for new Databricks workspaces. - + For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. Hive Metastore will not be provisioned. 3. Disables the use of ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to 13.3LTS. @@ -13,35 +13,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyFeaturesResponse Deletes the disable legacy features setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteDisableLegacyFeaturesResponse` .. py:method:: get( [, etag: Optional[str]]) -> DisableLegacyFeatures Gets the value of the disable legacy features setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DisableLegacyFeatures` .. py:method:: update(allow_missing: bool, setting: DisableLegacyFeatures, field_mask: str) -> DisableLegacyFeatures Updates the value of the disable legacy features setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`DisableLegacyFeatures` @@ -51,10 +51,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`DisableLegacyFeatures` \ No newline at end of file diff --git a/docs/account/settings/enable_ip_access_lists.rst b/docs/account/settings/enable_ip_access_lists.rst index f23187dc7..83c76aaf8 100644 --- a/docs/account/settings/enable_ip_access_lists.rst +++ b/docs/account/settings/enable_ip_access_lists.rst @@ -10,35 +10,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteAccountIpAccessEnableResponse Reverts the value of the account IP access toggle setting to default (ON) - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteAccountIpAccessEnableResponse` .. py:method:: get( [, etag: Optional[str]]) -> AccountIpAccessEnable Gets the value of the account IP access toggle setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`AccountIpAccessEnable` .. py:method:: update(allow_missing: bool, setting: AccountIpAccessEnable, field_mask: str) -> AccountIpAccessEnable Updates the value of the account IP access toggle setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`AccountIpAccessEnable` @@ -48,10 +48,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`AccountIpAccessEnable` \ No newline at end of file diff --git a/docs/account/settings/esm_enablement_account.rst b/docs/account/settings/esm_enablement_account.rst index 1764b7a08..c7f8006b6 100644 --- a/docs/account/settings/esm_enablement_account.rst +++ b/docs/account/settings/esm_enablement_account.rst @@ -11,21 +11,21 @@ .. py:method:: get( [, etag: Optional[str]]) -> EsmEnablementAccountSetting Gets the enhanced security monitoring setting for new workspaces. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`EsmEnablementAccountSetting` .. py:method:: update(allow_missing: bool, setting: EsmEnablementAccountSetting, field_mask: str) -> EsmEnablementAccountSetting Updates the value of the enhanced security monitoring setting for new workspaces. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`EsmEnablementAccountSetting` @@ -35,10 +35,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`EsmEnablementAccountSetting` \ No newline at end of file diff --git a/docs/account/settings/ip_access_lists.rst b/docs/account/settings/ip_access_lists.rst index e59783116..9ff2dfb29 100644 --- a/docs/account/settings/ip_access_lists.rst +++ b/docs/account/settings/ip_access_lists.rst @@ -6,79 +6,79 @@ The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. - + Account IP Access Lists affect web application access and REST API access to the account console and account APIs. If the feature is disabled for the account, all access is allowed for this account. There is support for allow lists (inclusion) and block lists (exclusion). - + When a connection is attempted: 1. **First, all block lists are checked.** If the connection IP address matches any block list, the connection is rejected. 2. **If the connection was not rejected by block lists**, the IP address is compared with the allow lists. - + If there is at least one allow list for the account, the connection is allowed only if the IP address matches an allow list. If there are no allow lists for the account, all IP addresses are allowed. - + For all allow lists and block lists combined, the account supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. - + After changes to the account-level IP access lists, it can take a few minutes for changes to take effect. .. py:method:: create(label: str, list_type: ListType [, ip_addresses: Optional[List[str]]]) -> CreateIpAccessListResponse Creates an IP access list for the account. - + A list can be an allow list or a block list. See the top of this file for a description of how the server treats allow lists and block lists at runtime. - + When creating or updating an IP access list: - + * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. * If the new list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - + It can take a few minutes for the changes to take effect. - + :param label: str Label for the IP access list. This **cannot** be empty. :param list_type: :class:`ListType` :param ip_addresses: List[str] (optional) - + :returns: :class:`CreateIpAccessListResponse` .. py:method:: delete(ip_access_list_id: str) Deletes an IP access list, specified by its list ID. - + :param ip_access_list_id: str The ID for the corresponding IP access list - - + + .. py:method:: get(ip_access_list_id: str) -> GetIpAccessListResponse Gets an IP access list, specified by its list ID. - + :param ip_access_list_id: str The ID for the corresponding IP access list - + :returns: :class:`GetIpAccessListResponse` .. py:method:: list() -> Iterator[IpAccessListInfo] Gets all IP access lists for the specified account. - - + + :returns: Iterator over :class:`IpAccessListInfo` .. py:method:: replace(ip_access_list_id: str, label: str, list_type: ListType, enabled: bool [, ip_addresses: Optional[List[str]]]) Replaces an IP access list, specified by its ID. - + A list can include allow lists and block lists. See the top of this file for a description of how the server treats allow lists and block lists at run time. When replacing an IP access list: * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, where one @@ -86,7 +86,7 @@ `QUOTA_EXCEEDED`. * If the resulting list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. It can take a few minutes for the changes to take effect. - + :param ip_access_list_id: str The ID for the corresponding IP access list :param label: str @@ -95,26 +95,26 @@ :param enabled: bool Specifies whether this IP access list is enabled. :param ip_addresses: List[str] (optional) - - + + .. py:method:: update(ip_access_list_id: str [, enabled: Optional[bool], ip_addresses: Optional[List[str]], label: Optional[str], list_type: Optional[ListType]]) Updates an existing IP access list, specified by its ID. - + A list can include allow lists and block lists. See the top of this file for a description of how the server treats allow lists and block lists at run time. - + When updating an IP access list: - + * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. * If the updated list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - + It can take a few minutes for the changes to take effect. - + :param ip_access_list_id: str The ID for the corresponding IP access list :param enabled: bool (optional) @@ -123,6 +123,6 @@ :param label: str (optional) Label for the IP access list. This **cannot** be empty. :param list_type: :class:`ListType` (optional) - - + + \ No newline at end of file diff --git a/docs/account/settings/llm_proxy_partner_powered_account.rst b/docs/account/settings/llm_proxy_partner_powered_account.rst index 58db9d11b..5c8a7b2a2 100644 --- a/docs/account/settings/llm_proxy_partner_powered_account.rst +++ b/docs/account/settings/llm_proxy_partner_powered_account.rst @@ -9,21 +9,21 @@ .. py:method:: get( [, etag: Optional[str]]) -> LlmProxyPartnerPoweredAccount Gets the enable partner powered AI features account setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`LlmProxyPartnerPoweredAccount` .. py:method:: update(allow_missing: bool, setting: LlmProxyPartnerPoweredAccount, field_mask: str) -> LlmProxyPartnerPoweredAccount Updates the enable partner powered AI features account setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`LlmProxyPartnerPoweredAccount` @@ -33,10 +33,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`LlmProxyPartnerPoweredAccount` \ No newline at end of file diff --git a/docs/account/settings/llm_proxy_partner_powered_enforce.rst b/docs/account/settings/llm_proxy_partner_powered_enforce.rst index 65dc1d42c..61a186007 100644 --- a/docs/account/settings/llm_proxy_partner_powered_enforce.rst +++ b/docs/account/settings/llm_proxy_partner_powered_enforce.rst @@ -10,21 +10,21 @@ .. py:method:: get( [, etag: Optional[str]]) -> LlmProxyPartnerPoweredEnforce Gets the enforcement status of partner powered AI features account setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`LlmProxyPartnerPoweredEnforce` .. py:method:: update(allow_missing: bool, setting: LlmProxyPartnerPoweredEnforce, field_mask: str) -> LlmProxyPartnerPoweredEnforce Updates the enable enforcement status of partner powered AI features account setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`LlmProxyPartnerPoweredEnforce` @@ -34,10 +34,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`LlmProxyPartnerPoweredEnforce` \ No newline at end of file diff --git a/docs/account/settings/network_connectivity.rst b/docs/account/settings/network_connectivity.rst index 90c885c17..c56f4bc23 100644 --- a/docs/account/settings/network_connectivity.rst +++ b/docs/account/settings/network_connectivity.rst @@ -9,9 +9,8 @@ your Azure Storage accounts to allow access from Databricks. You can also use the API to provision private endpoints for Databricks to privately connect serverless compute resources to your Azure resources using Azure Private Link. See [configure serverless secure connectivity]. - - [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security .. py:method:: create_network_connectivity_configuration(network_connectivity_config: CreateNetworkConnectivityConfiguration) -> NetworkConnectivityConfiguration @@ -19,16 +18,16 @@ accessing your Azure Storage accounts. You can also use a network connectivity configuration to create Databricks managed private endpoints so that Databricks serverless compute resources privately access your resources. - + **IMPORTANT**: After you create the network connectivity configuration, you must assign one or more workspaces to the new network connectivity configuration. You can share one network connectivity configuration with multiple workspaces from the same Azure region within the same Databricks account. See [configure serverless secure connectivity]. - + [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security - + :param network_connectivity_config: :class:`CreateNetworkConnectivityConfiguration` - + :returns: :class:`NetworkConnectivityConfiguration` @@ -37,28 +36,28 @@ Create a private endpoint rule for the specified network connectivity config object. Once the object is created, Databricks asynchronously provisions a new Azure private endpoint to your specified Azure resource. - + **IMPORTANT**: You must use Azure portal or other Azure tools to approve the private endpoint to complete the connection. To get the information of the private endpoint created, make a `GET` request on the new private endpoint rule. See [serverless private link]. - + [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link - + :param network_connectivity_config_id: str Your Network Connectivity Configuration ID. :param private_endpoint_rule: :class:`CreatePrivateEndpointRule` - + :returns: :class:`NccPrivateEndpointRule` .. py:method:: delete_network_connectivity_configuration(network_connectivity_config_id: str) Deletes a network connectivity configuration. - + :param network_connectivity_config_id: str Your Network Connectivity Configuration ID. - - + + .. py:method:: delete_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccPrivateEndpointRule @@ -67,56 +66,56 @@ endpoint is immediately deleted. Otherwise, the private endpoint is deactivated and will be deleted after seven days of deactivation. When a private endpoint is deactivated, the `deactivated` field is set to `true` and the private endpoint is not available to your serverless compute resources. - + :param network_connectivity_config_id: str Your Network Connectvity Configuration ID. :param private_endpoint_rule_id: str Your private endpoint rule ID. - + :returns: :class:`NccPrivateEndpointRule` .. py:method:: get_network_connectivity_configuration(network_connectivity_config_id: str) -> NetworkConnectivityConfiguration Gets a network connectivity configuration. - + :param network_connectivity_config_id: str Your Network Connectivity Configuration ID. - + :returns: :class:`NetworkConnectivityConfiguration` .. py:method:: get_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccPrivateEndpointRule Gets the private endpoint rule. - + :param network_connectivity_config_id: str Your Network Connectvity Configuration ID. :param private_endpoint_rule_id: str Your private endpoint rule ID. - + :returns: :class:`NccPrivateEndpointRule` .. py:method:: list_network_connectivity_configurations( [, page_token: Optional[str]]) -> Iterator[NetworkConnectivityConfiguration] Gets an array of network connectivity configurations. - + :param page_token: str (optional) Pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`NetworkConnectivityConfiguration` .. py:method:: list_private_endpoint_rules(network_connectivity_config_id: str [, page_token: Optional[str]]) -> Iterator[NccPrivateEndpointRule] Gets an array of private endpoint rules. - + :param network_connectivity_config_id: str Your Network Connectvity Configuration ID. :param page_token: str (optional) Pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`NccPrivateEndpointRule` @@ -124,7 +123,7 @@ Updates a private endpoint rule. Currently only a private endpoint rule to customer-managed resources is allowed to be updated. - + :param network_connectivity_config_id: str The ID of a network connectivity configuration, which is the parent resource of this private endpoint rule object. @@ -137,6 +136,6 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + :returns: :class:`NccPrivateEndpointRule` \ No newline at end of file diff --git a/docs/account/settings/network_policies.rst b/docs/account/settings/network_policies.rst index baecdc205..188375f75 100644 --- a/docs/account/settings/network_policies.rst +++ b/docs/account/settings/network_policies.rst @@ -15,51 +15,51 @@ Creates a new network policy to manage which network destinations can be accessed from the Databricks environment. - + :param network_policy: :class:`AccountNetworkPolicy` Network policy configuration details. - + :returns: :class:`AccountNetworkPolicy` .. py:method:: delete_network_policy_rpc(network_policy_id: str) Deletes a network policy. Cannot be called on 'default-policy'. - + :param network_policy_id: str The unique identifier of the network policy to delete. - - + + .. py:method:: get_network_policy_rpc(network_policy_id: str) -> AccountNetworkPolicy Gets a network policy. - + :param network_policy_id: str The unique identifier of the network policy to retrieve. - + :returns: :class:`AccountNetworkPolicy` .. py:method:: list_network_policies_rpc( [, page_token: Optional[str]]) -> Iterator[AccountNetworkPolicy] Gets an array of network policies. - + :param page_token: str (optional) Pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`AccountNetworkPolicy` .. py:method:: update_network_policy_rpc(network_policy_id: str, network_policy: AccountNetworkPolicy) -> AccountNetworkPolicy Updates a network policy. This allows you to modify the configuration of a network policy. - + :param network_policy_id: str The unique identifier for the network policy. :param network_policy: :class:`AccountNetworkPolicy` Updated network policy configuration details. - + :returns: :class:`AccountNetworkPolicy` \ No newline at end of file diff --git a/docs/account/settings/personal_compute.rst b/docs/account/settings/personal_compute.rst index c4a950330..aa4c61edd 100644 --- a/docs/account/settings/personal_compute.rst +++ b/docs/account/settings/personal_compute.rst @@ -7,7 +7,7 @@ The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources. By default all users in all workspaces have access (ON), but you can change the setting to instead let individual workspaces configure access control (DELEGATE). - + There is only one instance of this setting per account. Since this setting has a default value, this setting is present on all accounts even though it's never set on a given account. Deletion reverts the value of the setting back to the default value. @@ -15,35 +15,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeletePersonalComputeSettingResponse Reverts back the Personal Compute setting value to default (ON) - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeletePersonalComputeSettingResponse` .. py:method:: get( [, etag: Optional[str]]) -> PersonalComputeSetting Gets the value of the Personal Compute setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`PersonalComputeSetting` .. py:method:: update(allow_missing: bool, setting: PersonalComputeSetting, field_mask: str) -> PersonalComputeSetting Updates the value of the Personal Compute setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`PersonalComputeSetting` @@ -53,10 +53,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`PersonalComputeSetting` \ No newline at end of file diff --git a/docs/account/settings/settings.rst b/docs/account/settings/settings.rst index e96e06a7c..2efa1fd09 100644 --- a/docs/account/settings/settings.rst +++ b/docs/account/settings/settings.rst @@ -12,7 +12,7 @@ The compliance security profile settings at the account level control whether to enable it for new workspaces. By default, this account-level setting is disabled for new workspaces. After workspace creation, account admins can enable the compliance security profile individually for each workspace. - + This settings can be disabled so that new workspaces do not have compliance security profile enabled by default. @@ -20,7 +20,7 @@ :type: DisableLegacyFeaturesAPI Disable legacy features for new Databricks workspaces. - + For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. Hive Metastore will not be provisioned. 3. Disables the use of ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to 13.3LTS. @@ -55,7 +55,7 @@ The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources. By default all users in all workspaces have access (ON), but you can change the setting to instead let individual workspaces configure access control (DELEGATE). - + There is only one instance of this setting per account. Since this setting has a default value, this setting is present on all accounts even though it's never set on a given account. Deletion reverts the value of the setting back to the default value. \ No newline at end of file diff --git a/docs/account/settings/workspace_network_configuration.rst b/docs/account/settings/workspace_network_configuration.rst index 307fe42cb..03480b654 100644 --- a/docs/account/settings/workspace_network_configuration.rst +++ b/docs/account/settings/workspace_network_configuration.rst @@ -14,10 +14,10 @@ Gets the network option for a workspace. Every workspace has exactly one network policy binding, with 'default-policy' used if no explicit assignment exists. - + :param workspace_id: int The workspace ID. - + :returns: :class:`WorkspaceNetworkOption` @@ -25,11 +25,11 @@ Updates the network option for a workspace. This operation associates the workspace with the specified network policy. To revert to the default policy, specify 'default-policy' as the network_policy_id. - + :param workspace_id: int The workspace ID. :param workspace_network_option: :class:`WorkspaceNetworkOption` The network option details for the workspace. - + :returns: :class:`WorkspaceNetworkOption` \ No newline at end of file diff --git a/docs/account/settingsv2/settings_v2.rst b/docs/account/settingsv2/settings_v2.rst index 87cdbd619..fd5e25bdb 100644 --- a/docs/account/settingsv2/settings_v2.rst +++ b/docs/account/settingsv2/settings_v2.rst @@ -9,9 +9,9 @@ .. py:method:: get_public_account_setting(name: str) -> Setting Get a setting value at account level - + :param name: str - + :returns: :class:`Setting` @@ -20,7 +20,7 @@ List valid setting keys and metadata. These settings are available to referenced via [GET /api/2.1/settings/{name}](#~1api~1account~1settingsv2~1getpublicaccountsetting) and [PATCH /api/2.1/settings/{name}](#~1api~1account~1settingsv2~patchpublicaccountsetting) APIs - + :param page_size: int (optional) The maximum number of settings to return. The service may return fewer than this value. If unspecified, at most 200 settings will be returned. The maximum value is 1000; values above 1000 @@ -28,19 +28,19 @@ :param page_token: str (optional) A page token, received from a previous `ListAccountSettingsMetadataRequest` call. Provide this to retrieve the subsequent page. - + When paginating, all other parameters provided to `ListAccountSettingsMetadataRequest` must match the call that provided the page token. - + :returns: Iterator over :class:`SettingsMetadata` .. py:method:: patch_public_account_setting(name: str, setting: Setting) -> Setting Patch a setting value at account level - + :param name: str :param setting: :class:`Setting` - + :returns: :class:`Setting` \ No newline at end of file diff --git a/docs/dbdataclasses/agentbricks.rst b/docs/dbdataclasses/agentbricks.rst index 0b6d30000..0fbd1c881 100644 --- a/docs/dbdataclasses/agentbricks.rst +++ b/docs/dbdataclasses/agentbricks.rst @@ -37,3 +37,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: Table :members: :undoc-members: + +.. autoclass:: Token + :members: + :undoc-members: diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 3a562163a..80d7d8231 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -324,3 +324,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: ListCustomTemplatesResponse :members: :undoc-members: + +.. autoclass:: Token + :members: + :undoc-members: diff --git a/docs/dbdataclasses/billing.rst b/docs/dbdataclasses/billing.rst index 60f015a7b..a6f37ec70 100644 --- a/docs/dbdataclasses/billing.rst +++ b/docs/dbdataclasses/billing.rst @@ -192,6 +192,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POLICY_NAME :value: "POLICY_NAME" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateBudgetConfigurationBudget :members: :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index ec7a54980..33a87e3f0 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -1897,6 +1897,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TriggeredUpdateStatus :members: :undoc-members: diff --git a/docs/dbdataclasses/cleanrooms.rst b/docs/dbdataclasses/cleanrooms.rst index d1d800bc5..3d38c8c4b 100644 --- a/docs/dbdataclasses/cleanrooms.rst +++ b/docs/dbdataclasses/cleanrooms.rst @@ -196,3 +196,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: NotebookVersionReview :members: :undoc-members: + +.. autoclass:: Token + :members: + :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 22b9cf41a..ab79b30e2 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1405,6 +1405,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" + .. py:attribute:: NO_ACTIVATED_K8S + :value: "NO_ACTIVATED_K8S" + .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1519,6 +1522,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UPDATE_INSTANCE_PROFILE_FAILURE :value: "UPDATE_INSTANCE_PROFILE_FAILURE" + .. py:attribute:: USAGE_POLICY_ENTITLEMENT_DENIED + :value: "USAGE_POLICY_ENTITLEMENT_DENIED" + .. py:attribute:: USER_INITIATED_VM_TERMINATION :value: "USER_INITIATED_VM_TERMINATION" @@ -1553,6 +1559,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCESS :value: "SUCCESS" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UninstallLibrariesResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 559da8424..097fc6eac 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -37,6 +37,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GenieFeedback + :members: + :undoc-members: + .. py:class:: GenieFeedbackRating Feedback rating for Genie messages @@ -352,6 +356,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TrashDashboardResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/database.rst b/docs/dbdataclasses/database.rst index bdf7a2161..66a19b14d 100644 --- a/docs/dbdataclasses/database.rst +++ b/docs/dbdataclasses/database.rst @@ -226,3 +226,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: SyncedTableTriggeredUpdateStatus :members: :undoc-members: + +.. autoclass:: Token + :members: + :undoc-members: diff --git a/docs/dbdataclasses/files.rst b/docs/dbdataclasses/files.rst index 42be15a7b..d349c6bf9 100644 --- a/docs/dbdataclasses/files.rst +++ b/docs/dbdataclasses/files.rst @@ -72,6 +72,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UploadResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/iam.rst b/docs/dbdataclasses/iam.rst index 96abd3743..0d9b6e950 100644 --- a/docs/dbdataclasses/iam.rst +++ b/docs/dbdataclasses/iam.rst @@ -283,6 +283,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_SERVICE_PRINCIPAL :value: "URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_SERVICE_PRINCIPAL" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/index.rst b/docs/dbdataclasses/index.rst index 4b1463f0b..9f45078e5 100644 --- a/docs/dbdataclasses/index.rst +++ b/docs/dbdataclasses/index.rst @@ -15,6 +15,7 @@ Dataclasses database files iam + iamv2 jobs marketplace ml diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index c56480399..fdca6fa04 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -1087,6 +1087,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCESS :value: "SUCCESS" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TriggerInfo :members: :undoc-members: diff --git a/docs/dbdataclasses/marketplace.rst b/docs/dbdataclasses/marketplace.rst index f243184c1..ebdcfefcd 100644 --- a/docs/dbdataclasses/marketplace.rst +++ b/docs/dbdataclasses/marketplace.rst @@ -495,6 +495,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TokenDetail :members: :undoc-members: diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 6042988a1..0d8707f19 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -799,6 +799,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TransitionRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/oauth2.rst b/docs/dbdataclasses/oauth2.rst index 00c961155..73521f9d6 100644 --- a/docs/dbdataclasses/oauth2.rst +++ b/docs/dbdataclasses/oauth2.rst @@ -72,6 +72,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TokenAccessPolicy :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 9c0e96bf8..dddd7795f 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -137,6 +137,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DYNAMICS365 :value: "DYNAMICS365" + .. py:attribute:: FOREIGN_CATALOG + :value: "FOREIGN_CATALOG" + .. py:attribute:: GA4_RAW_DATA :value: "GA4_RAW_DATA" @@ -448,6 +451,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SCD_TYPE_2 :value: "SCD_TYPE_2" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateInfo :members: :undoc-members: diff --git a/docs/dbdataclasses/provisioning.rst b/docs/dbdataclasses/provisioning.rst index 69e237d5f..6925b19d7 100644 --- a/docs/dbdataclasses/provisioning.rst +++ b/docs/dbdataclasses/provisioning.rst @@ -197,6 +197,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/qualitymonitorv2.rst b/docs/dbdataclasses/qualitymonitorv2.rst index 9f4df6ee6..546847503 100644 --- a/docs/dbdataclasses/qualitymonitorv2.rst +++ b/docs/dbdataclasses/qualitymonitorv2.rst @@ -43,3 +43,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: QualityMonitor :members: :undoc-members: + +.. autoclass:: Token + :members: + :undoc-members: diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index 39b159395..17aa560d2 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -513,6 +513,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: MULTIGPU_MEDIUM :value: "MULTIGPU_MEDIUM" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TrafficConfig :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index e09f827f6..53d7448a1 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -739,6 +739,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TokenAccessControlRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/settingsv2.rst b/docs/dbdataclasses/settingsv2.rst index 583fe9aec..4d409c1f7 100644 --- a/docs/dbdataclasses/settingsv2.rst +++ b/docs/dbdataclasses/settingsv2.rst @@ -157,3 +157,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: StringMessage :members: :undoc-members: + +.. autoclass:: Token + :members: + :undoc-members: diff --git a/docs/dbdataclasses/sharing.rst b/docs/dbdataclasses/sharing.rst index 948e33bd6..245836200 100644 --- a/docs/dbdataclasses/sharing.rst +++ b/docs/dbdataclasses/sharing.rst @@ -494,6 +494,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: FILE_BASED_TABLE :value: "FILE_BASED_TABLE" + .. py:attribute:: FOREIGN_ICEBERG_TABLE + :value: "FOREIGN_ICEBERG_TABLE" + .. py:attribute:: FOREIGN_TABLE :value: "FOREIGN_TABLE" @@ -506,6 +509,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VIEW :value: "VIEW" +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateSharePermissionsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 3dd84f229..bc0272600 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -1373,6 +1373,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: TransferOwnershipObjectId :members: :undoc-members: diff --git a/docs/dbdataclasses/tags.rst b/docs/dbdataclasses/tags.rst index 23eb1d728..1e62a5c28 100644 --- a/docs/dbdataclasses/tags.rst +++ b/docs/dbdataclasses/tags.rst @@ -12,6 +12,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: Value :members: :undoc-members: diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index b6250cc64..0d1943db4 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -153,6 +153,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateEndpointCustomTagsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/workspace.rst b/docs/dbdataclasses/workspace.rst index db35f519d..90614f30e 100644 --- a/docs/dbdataclasses/workspace.rst +++ b/docs/dbdataclasses/workspace.rst @@ -287,6 +287,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Token + :members: + :undoc-members: + .. autoclass:: UpdateCredentialsResponse :members: :undoc-members: diff --git a/docs/workspace/agentbricks/agent_bricks.rst b/docs/workspace/agentbricks/agent_bricks.rst index bb4e4891f..d048efe79 100644 --- a/docs/workspace/agentbricks/agent_bricks.rst +++ b/docs/workspace/agentbricks/agent_bricks.rst @@ -9,16 +9,16 @@ .. py:method:: cancel_optimize(id: str) Cancel a Custom LLM Optimization Run. - + :param id: str - - + + .. py:method:: create_custom_llm(name: str, instructions: str [, agent_artifact_path: Optional[str], datasets: Optional[List[Dataset]], guidelines: Optional[List[str]]]) -> CustomLlm Create a Custom LLM. - + :param name: str Name of the custom LLM. Only alphanumeric characters and dashes allowed. :param instructions: str @@ -32,44 +32,44 @@ accepted. :param guidelines: List[str] (optional) Guidelines for the custom LLM to adhere to - + :returns: :class:`CustomLlm` .. py:method:: delete_custom_llm(id: str) Delete a Custom LLM. - + :param id: str The id of the custom llm - - + + .. py:method:: get_custom_llm(id: str) -> CustomLlm Get a Custom LLM. - + :param id: str The id of the custom llm - + :returns: :class:`CustomLlm` .. py:method:: start_optimize(id: str) -> CustomLlm Start a Custom LLM Optimization Run. - + :param id: str The Id of the tile. - + :returns: :class:`CustomLlm` .. py:method:: update_custom_llm(id: str, custom_llm: CustomLlm, update_mask: str) -> CustomLlm Update a Custom LLM. - + :param id: str The id of the custom llm :param custom_llm: :class:`CustomLlm` @@ -77,16 +77,16 @@ :param update_mask: str The list of the CustomLlm fields to update. These should correspond to the values (or lack thereof) present in `custom_llm`. - + The field mask must be a single string, with multiple fields separated by commas (no spaces). The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`CustomLlm` \ No newline at end of file diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index d3761b9db..9ccfe4e8c 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -10,11 +10,11 @@ .. py:method:: create(app: App [, no_compute: Optional[bool]]) -> Wait[App] Creates a new app. - + :param app: :class:`App` :param no_compute: bool (optional) If true, the app will not be started after creation. - + :returns: Long-running operation waiter for :class:`App`. See :method:wait_get_app_active for more details. @@ -26,22 +26,22 @@ .. py:method:: delete(name: str) -> App Deletes an app. - + :param name: str The name of the app. - + :returns: :class:`App` .. py:method:: deploy(app_name: str, app_deployment: AppDeployment) -> Wait[AppDeployment] Creates an app deployment for the app with the supplied name. - + :param app_name: str The name of the app. :param app_deployment: :class:`AppDeployment` The app deployment configuration. - + :returns: Long-running operation waiter for :class:`AppDeployment`. See :method:wait_get_deployment_app_succeeded for more details. @@ -53,68 +53,68 @@ .. py:method:: get(name: str) -> App Retrieves information for the app with the supplied name. - + :param name: str The name of the app. - + :returns: :class:`App` .. py:method:: get_deployment(app_name: str, deployment_id: str) -> AppDeployment Retrieves information for the app deployment with the supplied name and deployment id. - + :param app_name: str The name of the app. :param deployment_id: str The unique id of the deployment. - + :returns: :class:`AppDeployment` .. py:method:: get_permission_levels(app_name: str) -> GetAppPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param app_name: str The app for which to get or manage permissions. - + :returns: :class:`GetAppPermissionLevelsResponse` .. py:method:: get_permissions(app_name: str) -> AppPermissions Gets the permissions of an app. Apps can inherit permissions from their root object. - + :param app_name: str The app for which to get or manage permissions. - + :returns: :class:`AppPermissions` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[App] Lists all apps in the workspace. - + :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of apps. Requests first page if absent. - + :returns: Iterator over :class:`App` .. py:method:: list_deployments(app_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[AppDeployment] Lists all app deployments for the app with the supplied name. - + :param app_name: str The name of the app. :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of apps. Requests first page if absent. - + :returns: Iterator over :class:`AppDeployment` @@ -122,21 +122,21 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param app_name: str The app for which to get or manage permissions. :param access_control_list: List[:class:`AppAccessControlRequest`] (optional) - + :returns: :class:`AppPermissions` .. py:method:: start(name: str) -> Wait[App] Start the last active deployment of the app in the workspace. - + :param name: str The name of the app. - + :returns: Long-running operation waiter for :class:`App`. See :method:wait_get_app_active for more details. @@ -148,10 +148,10 @@ .. py:method:: stop(name: str) -> Wait[App] Stops the active deployment of the app in the workspace. - + :param name: str The name of the app. - + :returns: Long-running operation waiter for :class:`App`. See :method:wait_get_app_stopped for more details. @@ -163,23 +163,23 @@ .. py:method:: update(name: str, app: App) -> App Updates the app with the supplied name. - + :param name: str The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace. :param app: :class:`App` - + :returns: :class:`App` .. py:method:: update_permissions(app_name: str [, access_control_list: Optional[List[AppAccessControlRequest]]]) -> AppPermissions Updates the permissions on an app. Apps can inherit permissions from their root object. - + :param app_name: str The app for which to get or manage permissions. :param access_control_list: List[:class:`AppAccessControlRequest`] (optional) - + :returns: :class:`AppPermissions` diff --git a/docs/workspace/apps/apps_settings.rst b/docs/workspace/apps/apps_settings.rst index acea870cb..305b87030 100644 --- a/docs/workspace/apps/apps_settings.rst +++ b/docs/workspace/apps/apps_settings.rst @@ -9,52 +9,52 @@ .. py:method:: create_custom_template(template: CustomTemplate) -> CustomTemplate Creates a custom template. - + :param template: :class:`CustomTemplate` - + :returns: :class:`CustomTemplate` .. py:method:: delete_custom_template(name: str) -> CustomTemplate Deletes the custom template with the specified name. - + :param name: str The name of the custom template. - + :returns: :class:`CustomTemplate` .. py:method:: get_custom_template(name: str) -> CustomTemplate Gets the custom template with the specified name. - + :param name: str The name of the custom template. - + :returns: :class:`CustomTemplate` .. py:method:: list_custom_templates( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[CustomTemplate] Lists all custom templates in the workspace. - + :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of custom templates. Requests first page if absent. - + :returns: Iterator over :class:`CustomTemplate` .. py:method:: update_custom_template(name: str, template: CustomTemplate) -> CustomTemplate Updates the custom template with the specified name. Note that the template name cannot be updated. - + :param name: str The name of the template. It must contain only alphanumeric characters, hyphens, underscores, and whitespaces. It must be unique within the workspace. :param template: :class:`CustomTemplate` - + :returns: :class:`CustomTemplate` \ No newline at end of file diff --git a/docs/workspace/catalog/artifact_allowlists.rst b/docs/workspace/catalog/artifact_allowlists.rst index af5a0b28b..d63aadb47 100644 --- a/docs/workspace/catalog/artifact_allowlists.rst +++ b/docs/workspace/catalog/artifact_allowlists.rst @@ -11,10 +11,10 @@ Get the artifact allowlist of a certain artifact type. The caller must be a metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. - + :param artifact_type: :class:`ArtifactType` The artifact type of the allowlist. - + :returns: :class:`ArtifactAllowlistInfo` @@ -23,7 +23,7 @@ Set the artifact allowlist of a certain artifact type. The whole artifact allowlist is replaced with the new allowlist. The caller must be a metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. - + :param artifact_type: :class:`ArtifactType` The artifact type of the allowlist. :param artifact_matchers: List[:class:`ArtifactMatcher`] @@ -34,6 +34,6 @@ Username of the user who set the artifact allowlist. :param metastore_id: str (optional) Unique identifier of parent metastore. - + :returns: :class:`ArtifactAllowlistInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 9a18ede8a..8c51b4647 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -6,7 +6,7 @@ A catalog is the first layer of Unity Catalog’s three-level namespace. It’s used to organize your data assets. Users can see all catalogs on which they have been assigned the USE_CATALOG data permission. - + In Unity Catalog, admins and data stewards manage users and their access to data centrally across all of the workspaces in a Databricks account. Users in different workspaces can share access to the same data, depending on privileges granted centrally in Unity Catalog. @@ -31,7 +31,7 @@ Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. - + :param name: str Name of catalog. :param comment: str (optional) @@ -44,13 +44,13 @@ A map of key-value properties attached to the securable. :param provider_name: str (optional) The name of delta sharing provider. - + A Delta Sharing catalog is a catalog that is based on a Delta share on a remote sharing server. :param share_name: str (optional) The name of the share under the share provider. :param storage_root: str (optional) Storage root URL for managed tables within catalog. - + :returns: :class:`CatalogInfo` @@ -58,13 +58,13 @@ Deletes the catalog that matches the supplied name. The caller must be a metastore admin or the owner of the catalog. - + :param name: str The name of the catalog. :param force: bool (optional) Force deletion even if the catalog is not empty. - - + + .. py:method:: get(name: str [, include_browse: Optional[bool]]) -> CatalogInfo @@ -89,13 +89,13 @@ Gets the specified catalog in a metastore. The caller must be a metastore admin, the owner of the catalog, or a user that has the **USE_CATALOG** privilege set for their account. - + :param name: str The name of the catalog. :param include_browse: bool (optional) Whether to include catalogs in the response for which the principal can only access selective metadata for - + :returns: :class:`CatalogInfo` @@ -117,7 +117,7 @@ retrieved. Otherwise, only catalogs owned by the caller (or for which the caller has the **USE_CATALOG** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. - + :param include_browse: bool (optional) Whether to include catalogs in the response for which the principal can only access selective metadata for @@ -131,7 +131,7 @@ response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`CatalogInfo` @@ -157,7 +157,7 @@ Updates the catalog that matches the supplied name. The caller must be either the owner of the catalog, or a metastore admin (when changing the owner field of the catalog). - + :param name: str The name of the catalog. :param comment: str (optional) @@ -174,6 +174,6 @@ Username of current owner of catalog. :param properties: Dict[str,str] (optional) A map of key-value properties attached to the securable. - + :returns: :class:`CatalogInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/connections.rst b/docs/workspace/catalog/connections.rst index ec7f39be2..6f3d0ac6e 100644 --- a/docs/workspace/catalog/connections.rst +++ b/docs/workspace/catalog/connections.rst @@ -5,7 +5,7 @@ .. py:class:: ConnectionsAPI Connections allow for creating a connection to an external data source. - + A connection is an abstraction of an external data source that can be connected from Databricks Compute. Creating a connection object is the first step to managing external data sources within Unity Catalog, with the second step being creating a data object (catalog, schema, or table) using the connection. Data @@ -42,10 +42,10 @@ w.connections.delete(name=conn_create.name) Creates a new connection - + Creates a new connection to an external data source. It allows users to specify connection details and configurations for interaction with the external server. - + :param name: str Name of the connection. :param connection_type: :class:`ConnectionType` @@ -58,18 +58,18 @@ A map of key-value properties attached to the securable. :param read_only: bool (optional) If the connection is read only. - + :returns: :class:`ConnectionInfo` .. py:method:: delete(name: str) Deletes the connection that matches the supplied name. - + :param name: str The name of the connection to be deleted. - - + + .. py:method:: get(name: str) -> ConnectionInfo @@ -112,10 +112,10 @@ w.connections.delete(name=conn_create.name) Gets a connection from it's name. - + :param name: str Name of the connection. - + :returns: :class:`ConnectionInfo` @@ -134,7 +134,7 @@ conn_list = w.connections.list(catalog.ListConnectionsRequest()) List all connections. - + :param max_results: int (optional) Maximum number of connections to return. - If not set, all connections are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this value and @@ -142,7 +142,7 @@ (recommended); - when set to a value less than 0, an invalid parameter error is returned; :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ConnectionInfo` @@ -184,7 +184,7 @@ w.connections.delete(name=conn_create.name) Updates the connection that matches the supplied name. - + :param name: str Name of the connection. :param options: Dict[str,str] @@ -193,6 +193,6 @@ New name for the connection. :param owner: str (optional) Username of current owner of the connection. - + :returns: :class:`ConnectionInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/credentials.rst b/docs/workspace/catalog/credentials.rst index 93b49d3f4..2d5984c82 100644 --- a/docs/workspace/catalog/credentials.rst +++ b/docs/workspace/catalog/credentials.rst @@ -7,7 +7,7 @@ A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. Each credential is subject to Unity Catalog access-control policies that control which users and groups can access the credential. - + To create credentials, you must be a Databricks account admin or have the `CREATE SERVICE CREDENTIAL` privilege. The user who creates the credential can delegate ownership to another user or group to manage permissions on it. @@ -16,10 +16,10 @@ Creates a new credential. The type of credential to be created is determined by the **purpose** field, which should be either **SERVICE** or **STORAGE**. - + The caller must be a metastore admin or have the metastore privilege **CREATE_STORAGE_CREDENTIAL** for storage credentials, or **CREATE_SERVICE_CREDENTIAL** for service credentials. - + :param name: str The credential name. The name must be unique among storage and service credentials within the metastore. @@ -40,7 +40,7 @@ **STORAGE**. :param skip_validation: bool (optional) Optional. Supplying true to this argument skips validation of the created set of credentials. - + :returns: :class:`CredentialInfo` @@ -48,26 +48,26 @@ Deletes a service or storage credential from the metastore. The caller must be an owner of the credential. - + :param name_arg: str Name of the credential. :param force: bool (optional) Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**). - - + + .. py:method:: generate_temporary_service_credential(credential_name: str [, azure_options: Optional[GenerateTemporaryServiceCredentialAzureOptions], gcp_options: Optional[GenerateTemporaryServiceCredentialGcpOptions]]) -> TemporaryCredentials Returns a set of temporary credentials generated using the specified service credential. The caller must be a metastore admin or have the metastore privilege **ACCESS** on the service credential. - + :param credential_name: str The name of the service credential used to generate a temporary credential :param azure_options: :class:`GenerateTemporaryServiceCredentialAzureOptions` (optional) :param gcp_options: :class:`GenerateTemporaryServiceCredentialGcpOptions` (optional) - + :returns: :class:`TemporaryCredentials` @@ -75,21 +75,21 @@ Gets a service or storage credential from the metastore. The caller must be a metastore admin, the owner of the credential, or have any permission on the credential. - + :param name_arg: str Name of the credential. - + :returns: :class:`CredentialInfo` .. py:method:: list_credentials( [, max_results: Optional[int], page_token: Optional[str], purpose: Optional[CredentialPurpose]]) -> Iterator[CredentialInfo] Gets an array of credentials (as __CredentialInfo__ objects). - + The array is limited to only the credentials that the caller has permission to access. If the caller is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of the elements in the array. - + :param max_results: int (optional) Maximum number of credentials to return. - If not set, the default max page size is used. - When set to a value greater than 0, the page length is the minimum of this value and a server-configured @@ -99,17 +99,17 @@ Opaque token to retrieve the next page of results. :param purpose: :class:`CredentialPurpose` (optional) Return only credentials for the specified purpose. - + :returns: Iterator over :class:`CredentialInfo` .. py:method:: update_credential(name_arg: str [, aws_iam_role: Optional[AwsIamRole], azure_managed_identity: Optional[AzureManagedIdentity], azure_service_principal: Optional[AzureServicePrincipal], comment: Optional[str], databricks_gcp_service_account: Optional[DatabricksGcpServiceAccount], force: Optional[bool], isolation_mode: Optional[IsolationMode], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool]]) -> CredentialInfo Updates a service or storage credential on the metastore. - + The caller must be the owner of the credential or a metastore admin or have the `MANAGE` permission. If the caller is a metastore admin, only the __owner__ field can be changed. - + :param name_arg: str Name of the credential. :param aws_iam_role: :class:`AwsIamRole` (optional) @@ -136,26 +136,26 @@ **STORAGE**. :param skip_validation: bool (optional) Supply true to this argument to skip validation of the updated credential. - + :returns: :class:`CredentialInfo` .. py:method:: validate_credential( [, aws_iam_role: Optional[AwsIamRole], azure_managed_identity: Optional[AzureManagedIdentity], credential_name: Optional[str], databricks_gcp_service_account: Optional[DatabricksGcpServiceAccount], external_location_name: Optional[str], purpose: Optional[CredentialPurpose], read_only: Optional[bool], url: Optional[str]]) -> ValidateCredentialResponse Validates a credential. - + For service credentials (purpose is **SERVICE**), either the __credential_name__ or the cloud-specific credential must be provided. - + For storage credentials (purpose is **STORAGE**), at least one of __external_location_name__ and __url__ need to be provided. If only one of them is provided, it will be used for validation. And if both are provided, the __url__ will be used for validation, and __external_location_name__ will be ignored when checking overlapping urls. Either the __credential_name__ or the cloud-specific credential must be provided. - + The caller must be a metastore admin or the credential owner or have the required permission on the metastore and the credential (e.g., **CREATE_EXTERNAL_LOCATION** when purpose is **STORAGE**). - + :param aws_iam_role: :class:`AwsIamRole` (optional) :param azure_managed_identity: :class:`AzureManagedIdentity` (optional) :param credential_name: str (optional) @@ -171,6 +171,6 @@ (purpose is **STORAGE**.) :param url: str (optional) The external location url to validate. Only applicable when purpose is **STORAGE**. - + :returns: :class:`ValidateCredentialResponse` \ No newline at end of file diff --git a/docs/workspace/catalog/entity_tag_assignments.rst b/docs/workspace/catalog/entity_tag_assignments.rst index fcba2bbae..68eb4d410 100644 --- a/docs/workspace/catalog/entity_tag_assignments.rst +++ b/docs/workspace/catalog/entity_tag_assignments.rst @@ -12,34 +12,34 @@ .. py:method:: create(tag_assignment: EntityTagAssignment) -> EntityTagAssignment Creates a tag assignment for an Unity Catalog entity. - + To add tags to Unity Catalog entities, you must own the entity or have the following privileges: - **APPLY TAG** on the entity - **USE SCHEMA** on the entity's parent schema - **USE CATALOG** on the entity's parent catalog - + To add a governed tag to Unity Catalog entities, you must also have the **ASSIGN** or **MANAGE** permission on the tag policy. See [Manage tag policy permissions]. - + [Manage tag policy permissions]: https://docs.databricks.com/aws/en/admin/tag-policies/manage-permissions - + :param tag_assignment: :class:`EntityTagAssignment` - + :returns: :class:`EntityTagAssignment` .. py:method:: delete(entity_type: str, entity_name: str, tag_key: str) Deletes a tag assignment for an Unity Catalog entity by its key. - + To delete tags from Unity Catalog entities, you must own the entity or have the following privileges: - **APPLY TAG** on the entity - **USE_SCHEMA** on the entity's parent schema - **USE_CATALOG** on the entity's parent catalog - + To delete a governed tag from Unity Catalog entities, you must also have the **ASSIGN** or **MANAGE** permission on the tag policy. See [Manage tag policy permissions]. - + [Manage tag policy permissions]: https://docs.databricks.com/aws/en/admin/tag-policies/manage-permissions - + :param entity_type: str The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, columns, volumes. @@ -47,14 +47,14 @@ The fully qualified name of the entity to which the tag is assigned :param tag_key: str Required. The key of the tag to delete - - + + .. py:method:: get(entity_type: str, entity_name: str, tag_key: str) -> EntityTagAssignment Gets a tag assignment for an Unity Catalog entity by tag key. - + :param entity_type: str The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, columns, volumes. @@ -62,14 +62,14 @@ The fully qualified name of the entity to which the tag is assigned :param tag_key: str Required. The key of the tag - + :returns: :class:`EntityTagAssignment` .. py:method:: list(entity_type: str, entity_name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[EntityTagAssignment] List tag assignments for an Unity Catalog entity - + :param entity_type: str The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, columns, volumes. @@ -79,23 +79,23 @@ Optional. Maximum number of tag assignments to return in a single page :param page_token: str (optional) Optional. Pagination token to retrieve the next page of results - + :returns: Iterator over :class:`EntityTagAssignment` .. py:method:: update(entity_type: str, entity_name: str, tag_key: str, tag_assignment: EntityTagAssignment, update_mask: str) -> EntityTagAssignment Updates an existing tag assignment for an Unity Catalog entity. - + To update tags to Unity Catalog entities, you must own the entity or have the following privileges: - **APPLY TAG** on the entity - **USE SCHEMA** on the entity's parent schema - **USE CATALOG** on the entity's parent catalog - + To update a governed tag to Unity Catalog entities, you must also have the **ASSIGN** or **MANAGE** permission on the tag policy. See [Manage tag policy permissions]. - + [Manage tag policy permissions]: https://docs.databricks.com/aws/en/admin/tag-policies/manage-permissions - + :param entity_type: str The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, columns, volumes. @@ -110,10 +110,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`EntityTagAssignment` \ No newline at end of file diff --git a/docs/workspace/catalog/external_lineage.rst b/docs/workspace/catalog/external_lineage.rst index d8a852f5f..91a500e82 100644 --- a/docs/workspace/catalog/external_lineage.rst +++ b/docs/workspace/catalog/external_lineage.rst @@ -7,7 +7,7 @@ External Lineage APIs enable defining and managing lineage relationships between Databricks objects and external systems. These APIs allow users to capture data flows connecting Databricks tables, models, and file paths with external metadata objects. - + With these APIs, users can create, update, delete, and list lineage relationships with support for column-level mappings and custom properties. @@ -15,9 +15,9 @@ Creates an external lineage relationship between a Databricks or external metadata object and another external metadata object. - + :param external_lineage_relationship: :class:`CreateRequestExternalLineage` - + :returns: :class:`ExternalLineageRelationship` @@ -25,17 +25,17 @@ Deletes an external lineage relationship between a Databricks or external metadata object and another external metadata object. - + :param external_lineage_relationship: :class:`DeleteRequestExternalLineage` - - + + .. py:method:: list_external_lineage_relationships(object_info: ExternalLineageObject, lineage_direction: LineageDirection [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ExternalLineageInfo] Lists external lineage relationships of a Databricks object or external metadata given a supplied direction. - + :param object_info: :class:`ExternalLineageObject` The object to query external lineage relationships for. Since this field is a query parameter, please flatten the nested fields. For example, if the object is a table, the query parameter should @@ -47,7 +47,7 @@ value must be less than or equal to 1000. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ExternalLineageInfo` @@ -55,7 +55,7 @@ Updates an external lineage relationship between a Databricks or external metadata object and another external metadata object. - + :param external_lineage_relationship: :class:`UpdateRequestExternalLineage` :param update_mask: str The field mask must be a single string, with multiple fields separated by commas (no spaces). The @@ -63,10 +63,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`ExternalLineageRelationship` \ No newline at end of file diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 624fe1958..9167c1650 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -9,9 +9,9 @@ access-control policies that control which users and groups can access the credential. If a user does not have access to an external location in Unity Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant on the user’s behalf. - + Databricks recommends using external locations rather than using storage credentials directly. - + To create external locations, you must be a metastore admin or a user with the **CREATE_EXTERNAL_LOCATION** privilege. @@ -50,7 +50,7 @@ Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage credential. - + :param name: str Name of the external location. :param url: str @@ -67,12 +67,13 @@ enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. :param file_event_queue: :class:`FileEventQueue` (optional) - File event queue settings. + File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly one + of the documented properties. :param read_only: bool (optional) Indicates whether the external location is read-only. :param skip_validation: bool (optional) Skips validation of the storage credential associated with the external location. - + :returns: :class:`ExternalLocationInfo` @@ -80,13 +81,13 @@ Deletes the specified external location from the metastore. The caller must be the owner of the external location. - + :param name: str Name of the external location. :param force: bool (optional) Force deletion even if there are dependent external tables or mounts. - - + + .. py:method:: get(name: str [, include_browse: Optional[bool]]) -> ExternalLocationInfo @@ -123,13 +124,13 @@ Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location. - + :param name: str Name of the external location. :param include_browse: bool (optional) Whether to include external locations in the response for which the principal can only access selective metadata for - + :returns: :class:`ExternalLocationInfo` @@ -150,7 +151,7 @@ Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. There is no guarantee of a specific ordering of the elements in the array. - + :param include_browse: bool (optional) Whether to include external locations in the response for which the principal can only access selective metadata for @@ -161,7 +162,7 @@ value (recommended); - when set to a value less than 0, an invalid parameter error is returned; :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ExternalLocationInfo` @@ -204,7 +205,7 @@ Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external location. - + :param name: str Name of the external location. :param comment: str (optional) @@ -219,7 +220,8 @@ enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. :param file_event_queue: :class:`FileEventQueue` (optional) - File event queue settings. + File event queue settings. If `enable_file_events` is `true`, must be defined and have exactly one + of the documented properties. :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. :param isolation_mode: :class:`IsolationMode` (optional) @@ -233,6 +235,6 @@ Skips validation of the storage credential associated with the external location. :param url: str (optional) Path URL of the external location. - + :returns: :class:`ExternalLocationInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/external_metadata.rst b/docs/workspace/catalog/external_metadata.rst index 6c3f3912c..4c3b21490 100644 --- a/docs/workspace/catalog/external_metadata.rst +++ b/docs/workspace/catalog/external_metadata.rst @@ -6,7 +6,7 @@ External Metadata objects enable customers to register and manage metadata about external systems within Unity Catalog. - + These APIs provide a standardized way to create, update, retrieve, list, and delete external metadata objects. Fine-grained authorization ensures that only users with appropriate permissions can view and manage external metadata objects. @@ -16,9 +16,9 @@ Creates a new external metadata object in the parent metastore if the caller is a metastore admin or has the **CREATE_EXTERNAL_METADATA** privilege. Grants **BROWSE** to all account users upon creation by default. - + :param external_metadata: :class:`ExternalMetadata` - + :returns: :class:`ExternalMetadata` @@ -26,19 +26,19 @@ Deletes the external metadata object that matches the supplied name. The caller must be a metastore admin, the owner of the external metadata object, or a user that has the **MANAGE** privilege. - + :param name: str - - + + .. py:method:: get_external_metadata(name: str) -> ExternalMetadata Gets the specified external metadata object in a metastore. The caller must be a metastore admin, the owner of the external metadata object, or a user that has the **BROWSE** privilege. - + :param name: str - + :returns: :class:`ExternalMetadata` @@ -48,13 +48,13 @@ external metadata objects will be retrieved. Otherwise, only external metadata objects that the caller has **BROWSE** on will be retrieved. There is no guarantee of a specific ordering of the elements in the array. - + :param page_size: int (optional) Specifies the maximum number of external metadata objects to return in a single response. The value must be less than or equal to 1000. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ExternalMetadata` @@ -64,7 +64,7 @@ the owner or other metadata fields in one request. The caller must be a metastore admin, the owner of the external metadata object, or a user that has the **MODIFY** privilege. If the caller is updating the owner, they must also have the **MANAGE** privilege. - + :param name: str Name of the external metadata object. :param external_metadata: :class:`ExternalMetadata` @@ -74,10 +74,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`ExternalMetadata` \ No newline at end of file diff --git a/docs/workspace/catalog/functions.rst b/docs/workspace/catalog/functions.rst index 0297c0e22..cb277fe82 100644 --- a/docs/workspace/catalog/functions.rst +++ b/docs/workspace/catalog/functions.rst @@ -5,7 +5,7 @@ .. py:class:: FunctionsAPI Functions implement User-Defined Functions (UDFs) in Unity Catalog. - + The function implementation can be any SQL expression or Query, and it can be invoked wherever a table reference is allowed in a query. In Unity Catalog, a function resides at the same level as a table, so it can be referenced with the form __catalog_name__.__schema_name__.__function_name__. @@ -13,16 +13,16 @@ .. py:method:: create(function_info: CreateFunction) -> FunctionInfo **WARNING: This API is experimental and will change in future versions** - + Creates a new function - + The user must have the following permissions in order for the function to be created: - **USE_CATALOG** on the function's parent catalog - **USE_SCHEMA** and **CREATE_FUNCTION** on the function's parent schema - + :param function_info: :class:`CreateFunction` Partial __FunctionInfo__ specifying the function to be created. - + :returns: :class:`FunctionInfo` @@ -33,14 +33,14 @@ owner of the function's parent schema and have the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and have both the **USE_CATALOG** privilege on its parent catalog and the **USE_SCHEMA** privilege on its parent schema - + :param name: str The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). :param force: bool (optional) Force deletion even if the function is notempty. - - + + .. py:method:: get(name: str [, include_browse: Optional[bool]]) -> FunctionInfo @@ -51,14 +51,14 @@ of the function - Have the **USE_CATALOG** privilege on the function's parent catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the **EXECUTE** privilege on the function itself - + :param name: str The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). :param include_browse: bool (optional) Whether to include functions in the response for which the principal can only access selective metadata for - + :returns: :class:`FunctionInfo` @@ -69,7 +69,7 @@ on the catalog and the **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user is the owner. There is no guarantee of a specific ordering of the elements in the array. - + :param catalog_name: str Name of parent catalog for functions of interest. :param schema_name: str @@ -84,7 +84,7 @@ (recommended); - when set to a value less than 0, an invalid parameter error is returned; :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`FunctionInfo` @@ -96,12 +96,12 @@ function's parent schema and has the **USE_CATALOG** privilege on its parent catalog - Is the owner of the function itself and has the **USE_CATALOG** privilege on its parent catalog as well as the **USE_SCHEMA** privilege on the function's parent schema. - + :param name: str The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). :param owner: str (optional) Username of current owner of function. - + :returns: :class:`FunctionInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/grants.rst b/docs/workspace/catalog/grants.rst index 64c1b3484..0b65f9f37 100644 --- a/docs/workspace/catalog/grants.rst +++ b/docs/workspace/catalog/grants.rst @@ -8,7 +8,7 @@ Access can be granted by either a metastore admin, the owner of an object, or the owner of the catalog or schema that contains the object. Securable objects in Unity Catalog are hierarchical and privileges are inherited downward. - + Securable objects in Unity Catalog are hierarchical and privileges are inherited downward. This means that granting a privilege on the catalog automatically grants the privilege to all current and future objects within the catalog. Similarly, privileges granted on a schema are inherited by all current and future @@ -61,7 +61,7 @@ w.tables.delete(full_name=table_full_name) Gets the permissions for a securable. Does not include inherited permissions. - + :param securable_type: str Type of securable. :param full_name: str @@ -70,7 +70,7 @@ Specifies the maximum number of privileges to return (page length). Every PrivilegeAssignment present in a single page response is guaranteed to contain all the privileges granted on the requested Securable for the respective principal. - + If not set, all the permissions are returned. If set to - lesser than 0: invalid parameter error - 0: page length is set to a server configured value - lesser than 150 but greater than 0: invalid parameter error (this is to ensure that server is able to return at least one complete @@ -80,7 +80,7 @@ Opaque pagination token to go to next page based on previous query. :param principal: str (optional) If provided, only the permissions for the specified principal (user or group) are returned. - + :returns: :class:`GetPermissionsResponse` @@ -132,7 +132,7 @@ Gets the effective permissions for a securable. Includes inherited permissions from any parent securables. - + :param securable_type: str Type of securable. :param full_name: str @@ -142,7 +142,7 @@ EffectivePrivilegeAssignment present in a single page response is guaranteed to contain all the effective privileges granted on (or inherited by) the requested Securable for the respective principal. - + If not set, all the effective permissions are returned. If set to - lesser than 0: invalid parameter error - 0: page length is set to a server configured value - lesser than 150 but greater than 0: invalid parameter error (this is to ensure that server is able to return at least one complete @@ -153,7 +153,7 @@ :param principal: str (optional) If provided, only the effective permissions for the specified principal (user or group) are returned. - + :returns: :class:`EffectivePermissionsList` @@ -212,13 +212,13 @@ w.tables.delete(full_name=table_full_name) Updates the permissions for a securable. - + :param securable_type: str Type of securable. :param full_name: str Full name of securable. :param changes: List[:class:`PermissionsChange`] (optional) Array of permissions change objects. - + :returns: :class:`UpdatePermissionsResponse` \ No newline at end of file diff --git a/docs/workspace/catalog/metastores.rst b/docs/workspace/catalog/metastores.rst index 28636acf9..a867f417c 100644 --- a/docs/workspace/catalog/metastores.rst +++ b/docs/workspace/catalog/metastores.rst @@ -8,10 +8,10 @@ views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces to control which workloads use each metastore. For a workspace to use Unity Catalog, it must have a Unity Catalog metastore attached. - + Each metastore is configured with a root storage location in a cloud storage account. This storage location is used for metadata and managed tables data. - + NOTE: This metastore is distinct from the metastore included in Databricks workspaces created before Unity Catalog was released. If your workspace includes a legacy Hive metastore, the data in that metastore is available in a catalog named hive_metastore. @@ -45,7 +45,7 @@ Creates a new metastore assignment. If an assignment for the same __workspace_id__ exists, it will be overwritten by the new __metastore_id__ and __default_catalog_name__. The caller must be an account admin. - + :param workspace_id: int A workspace ID. :param metastore_id: str @@ -53,8 +53,8 @@ :param default_catalog_name: str The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace. - - + + .. py:method:: create(name: str [, region: Optional[str], storage_root: Optional[str]]) -> MetastoreInfo @@ -83,14 +83,14 @@ __owner__ field is not set), the owner of the new metastore is the user calling the __createMetastore__ API. If the __owner__ field is set to the empty string (**""**), the ownership is assigned to the System User instead. - + :param name: str The user-specified name of the metastore. :param region: str (optional) Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). :param storage_root: str (optional) The storage root URL for metastore - + :returns: :class:`MetastoreInfo` @@ -108,21 +108,21 @@ current_metastore = w.metastores.current() Gets the metastore assignment for the workspace being accessed. - - + + :returns: :class:`MetastoreAssignment` .. py:method:: delete(id: str [, force: Optional[bool]]) Deletes a metastore. The caller must be a metastore admin. - + :param id: str Unique ID of the metastore. :param force: bool (optional) Force deletion even if the metastore is not empty. Default is false. - - + + .. py:method:: get(id: str) -> MetastoreInfo @@ -151,10 +151,10 @@ Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this info. - + :param id: str Unique ID of the metastore. - + :returns: :class:`MetastoreInfo` @@ -173,7 +173,7 @@ Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a specific ordering of the elements in the array. - + :param max_results: int (optional) Maximum number of metastores to return. - when set to a value greater than 0, the page length is the minimum of this value and a server configured value; - when set to 0, the page length is set to a @@ -184,7 +184,7 @@ from the response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`MetastoreInfo` @@ -203,8 +203,8 @@ Gets information about a metastore. This summary includes the storage credential, the cloud vendor, the cloud region, and the global metastore ID. - - + + :returns: :class:`GetMetastoreSummaryResponse` @@ -235,13 +235,13 @@ w.metastores.delete(id=created.metastore_id, force=True) Deletes a metastore assignment. The caller must be an account administrator. - + :param workspace_id: int A workspace ID. :param metastore_id: str Query for the ID of the metastore to delete. - - + + .. py:method:: update(id: str [, delta_sharing_organization_name: Optional[str], delta_sharing_recipient_token_lifetime_in_seconds: Optional[int], delta_sharing_scope: Optional[DeltaSharingScopeEnum], new_name: Optional[str], owner: Optional[str], privilege_model_version: Optional[str], storage_root_credential_id: Optional[str]]) -> MetastoreInfo @@ -270,7 +270,7 @@ Updates information for a specific metastore. The caller must be a metastore admin. If the __owner__ field is set to the empty string (**""**), the ownership is updated to the System User. - + :param id: str Unique ID of the metastore. :param delta_sharing_organization_name: str (optional) @@ -288,7 +288,7 @@ Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). :param storage_root_credential_id: str (optional) UUID of storage credential to access the metastore storage_root. - + :returns: :class:`MetastoreInfo` @@ -298,7 +298,7 @@ __default_catalog_name__ for a specified Workspace, if the Workspace is already assigned a metastore. The caller must be an account admin to update __metastore_id__; otherwise, the caller can be a Workspace admin. - + :param workspace_id: int A workspace ID. :param default_catalog_name: str (optional) @@ -306,6 +306,6 @@ Namespace API" to configure the default catalog for a Databricks workspace. :param metastore_id: str (optional) The unique ID of the metastore. - - + + \ No newline at end of file diff --git a/docs/workspace/catalog/model_versions.rst b/docs/workspace/catalog/model_versions.rst index ec3ed4985..7d242e006 100644 --- a/docs/workspace/catalog/model_versions.rst +++ b/docs/workspace/catalog/model_versions.rst @@ -7,7 +7,7 @@ Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. Models in Unity Catalog provide centralized access control, auditing, lineage, and discovery of ML models across Databricks workspaces. - + This API reference documents the REST endpoints for managing model versions in Unity Catalog. For more details, see the [registered models API docs](/api/workspace/registeredmodels). @@ -15,27 +15,27 @@ Deletes a model version from the specified registered model. Any aliases assigned to the model version will also be deleted. - + The caller must be a metastore admin or an owner of the parent registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str The three-level (fully qualified) name of the model version :param version: int The integer version number of the model version - - + + .. py:method:: get(full_name: str, version: int [, include_aliases: Optional[bool], include_browse: Optional[bool]]) -> ModelVersionInfo Get a model version. - + The caller must be a metastore admin or an owner of (or have the **EXECUTE** privilege on) the parent registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str The three-level (fully qualified) name of the model version :param version: int @@ -45,25 +45,25 @@ :param include_browse: bool (optional) Whether to include model versions in the response for which the principal can only access selective metadata for - + :returns: :class:`ModelVersionInfo` .. py:method:: get_by_alias(full_name: str, alias: str [, include_aliases: Optional[bool]]) -> ModelVersionInfo Get a model version by alias. - + The caller must be a metastore admin or an owner of (or have the **EXECUTE** privilege on) the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str The three-level (fully qualified) name of the registered model :param alias: str The name of the alias :param include_aliases: bool (optional) Whether to include aliases associated with the model version in the response - + :returns: :class:`ModelVersionInfo` @@ -71,16 +71,16 @@ List model versions. You can list model versions under a particular schema, or list all model versions in the current metastore. - + The returned models are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the model versions. A regular user needs to be the owner or have the **EXECUTE** privilege on the parent registered model to recieve the model versions in the response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + There is no guarantee of a specific ordering of the elements in the response. The elements in the response will not contain any aliases or tags. - + :param full_name: str The full three-level name of the registered model under which to list model versions :param include_browse: bool (optional) @@ -94,26 +94,26 @@ value less than 0, an invalid parameter error is returned; :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ModelVersionInfo` .. py:method:: update(full_name: str, version: int [, comment: Optional[str]]) -> ModelVersionInfo Updates the specified model version. - + The caller must be a metastore admin or an owner of the parent registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + Currently only the comment of the model version can be updated. - + :param full_name: str The three-level (fully qualified) name of the model version :param version: int The integer version number of the model version :param comment: str (optional) The comment attached to the model version - + :returns: :class:`ModelVersionInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/online_tables.rst b/docs/workspace/catalog/online_tables.rst index 4ed7e718f..3bcae2d76 100644 --- a/docs/workspace/catalog/online_tables.rst +++ b/docs/workspace/catalog/online_tables.rst @@ -9,10 +9,10 @@ .. py:method:: create(table: OnlineTable) -> Wait[OnlineTable] Create a new Online Table. - + :param table: :class:`OnlineTable` Specification of the online table to be created. - + :returns: Long-running operation waiter for :class:`OnlineTable`. See :method:wait_get_online_table_active for more details. @@ -26,20 +26,20 @@ Delete an online table. Warning: This will delete all the data in the online table. If the source Delta table was deleted or modified since this Online Table was created, this will lose the data forever! - + :param name: str Full three-part (catalog, schema, table) name of the table. - - + + .. py:method:: get(name: str) -> OnlineTable Get information about an existing online table and its status. - + :param name: str Full three-part (catalog, schema, table) name of the table. - + :returns: :class:`OnlineTable` diff --git a/docs/workspace/catalog/policies.rst b/docs/workspace/catalog/policies.rst index 2eb9e6a99..d2580185d 100644 --- a/docs/workspace/catalog/policies.rst +++ b/docs/workspace/catalog/policies.rst @@ -14,38 +14,38 @@ .. py:method:: create_policy(policy_info: PolicyInfo) -> PolicyInfo Creates a new policy on a securable. The new policy applies to the securable and all its descendants. - + :param policy_info: :class:`PolicyInfo` Required. The policy to create. - + :returns: :class:`PolicyInfo` .. py:method:: delete_policy(on_securable_type: str, on_securable_fullname: str, name: str) -> DeletePolicyResponse Delete an ABAC policy defined on a securable. - + :param on_securable_type: str Required. The type of the securable to delete the policy from. :param on_securable_fullname: str Required. The fully qualified name of the securable to delete the policy from. :param name: str Required. The name of the policy to delete - + :returns: :class:`DeletePolicyResponse` .. py:method:: get_policy(on_securable_type: str, on_securable_fullname: str, name: str) -> PolicyInfo Get the policy definition on a securable - + :param on_securable_type: str Required. The type of the securable to retrieve the policy for. :param on_securable_fullname: str Required. The fully qualified name of securable to retrieve policy for. :param name: str Required. The name of the policy to retrieve. - + :returns: :class:`PolicyInfo` @@ -53,7 +53,7 @@ List all policies defined on a securable. Optionally, the list can include inherited policies defined on the securable's parent schema or catalog. - + :param on_securable_type: str Required. The type of the securable to list policies for. :param on_securable_fullname: str @@ -67,14 +67,14 @@ greater than 0, the page length is the minimum of this value and a server configured value; :param page_token: str (optional) Optional. Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`PolicyInfo` .. py:method:: update_policy(on_securable_type: str, on_securable_fullname: str, name: str, policy_info: PolicyInfo [, update_mask: Optional[str]]) -> PolicyInfo Update an ABAC policy on a securable. - + :param on_securable_type: str Required. The type of the securable to update the policy for. :param on_securable_fullname: str @@ -91,6 +91,6 @@ :param update_mask: str (optional) Optional. The update mask field for specifying user intentions on which fields to update in the request. - + :returns: :class:`PolicyInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/quality_monitors.rst b/docs/workspace/catalog/quality_monitors.rst index 79786717a..aa8c9c1e8 100644 --- a/docs/workspace/catalog/quality_monitors.rst +++ b/docs/workspace/catalog/quality_monitors.rst @@ -13,27 +13,27 @@ .. py:method:: cancel_refresh(table_name: str, refresh_id: int) Cancels an already-initiated refresh job. - + :param table_name: str UC table name in format `catalog.schema.table_name`. table_name is case insensitive and spaces are disallowed. :param refresh_id: int - - + + .. py:method:: create(table_name: str, output_schema_name: str, assets_dir: str [, baseline_table_name: Optional[str], custom_metrics: Optional[List[MonitorMetric]], data_classification_config: Optional[MonitorDataClassificationConfig], inference_log: Optional[MonitorInferenceLog], latest_monitor_failure_msg: Optional[str], notifications: Optional[MonitorNotifications], schedule: Optional[MonitorCronSchedule], skip_builtin_dashboard: Optional[bool], slicing_exprs: Optional[List[str]], snapshot: Optional[MonitorSnapshot], time_series: Optional[MonitorTimeSeries], warehouse_id: Optional[str]]) -> MonitorInfo Creates a new monitor for the specified table. - + The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent schema, and have **SELECT** access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. - + Workspace assets, such as the dashboard, will be created in the workspace where this call was made. - + :param table_name: str UC table name in format `catalog.schema.table_name`. This field corresponds to the {full_table_name_arg} arg in the endpoint path. @@ -73,107 +73,107 @@ :param warehouse_id: str (optional) Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. - + :returns: :class:`MonitorInfo` .. py:method:: delete(table_name: str) -> DeleteMonitorResponse Deletes a monitor for the specified table. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. - + Additionally, the call must be made from the workspace where the monitor was created. - + Note that the metric tables and dashboard will not be deleted as part of this call; those assets must be manually cleaned up (if desired). - + :param table_name: str UC table name in format `catalog.schema.table_name`. This field corresponds to the {full_table_name_arg} arg in the endpoint path. - + :returns: :class:`DeleteMonitorResponse` .. py:method:: get(table_name: str) -> MonitorInfo Gets a monitor for the specified table. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema. 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table. - + The returned information includes configuration values, as well as information on assets created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different workspace than where the monitor was created. - + :param table_name: str UC table name in format `catalog.schema.table_name`. This field corresponds to the {full_table_name_arg} arg in the endpoint path. - + :returns: :class:`MonitorInfo` .. py:method:: get_refresh(table_name: str, refresh_id: int) -> MonitorRefreshInfo Gets info about a specific monitor refresh using the given refresh ID. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table. - + Additionally, the call must be made from the workspace where the monitor was created. - + :param table_name: str Full name of the table. :param refresh_id: int ID of the refresh. - + :returns: :class:`MonitorRefreshInfo` .. py:method:: list_refreshes(table_name: str) -> MonitorRefreshListResponse Gets an array containing the history of the most recent refreshes (up to 25) for this table. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table. - + Additionally, the call must be made from the workspace where the monitor was created. - + :param table_name: str UC table name in format `catalog.schema.table_name`. table_name is case insensitive and spaces are disallowed. - + :returns: :class:`MonitorRefreshListResponse` .. py:method:: regenerate_dashboard(table_name: str [, warehouse_id: Optional[str]]) -> RegenerateDashboardResponse Regenerates the monitoring dashboard for the specified table. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table - + The call must be made from the workspace where the monitor was created. The dashboard will be regenerated in the assets directory that was specified when the monitor was created. - + :param table_name: str UC table name in format `catalog.schema.table_name`. This field corresponds to the {full_table_name_arg} arg in the endpoint path. :param warehouse_id: str (optional) Optional argument to specify the warehouse for dashboard regeneration. If not specified, the first running warehouse will be used. - + :returns: :class:`RegenerateDashboardResponse` @@ -181,35 +181,35 @@ Queues a metric refresh on the monitor for the specified table. The refresh will execute in the background. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table - + Additionally, the call must be made from the workspace where the monitor was created. - + :param table_name: str UC table name in format `catalog.schema.table_name`. table_name is case insensitive and spaces are disallowed. - + :returns: :class:`MonitorRefreshInfo` .. py:method:: update(table_name: str, output_schema_name: str [, baseline_table_name: Optional[str], custom_metrics: Optional[List[MonitorMetric]], dashboard_id: Optional[str], data_classification_config: Optional[MonitorDataClassificationConfig], inference_log: Optional[MonitorInferenceLog], latest_monitor_failure_msg: Optional[str], notifications: Optional[MonitorNotifications], schedule: Optional[MonitorCronSchedule], slicing_exprs: Optional[List[str]], snapshot: Optional[MonitorSnapshot], time_series: Optional[MonitorTimeSeries]]) -> MonitorInfo Updates a monitor for the specified table. - + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. - + Additionally, the call must be made from the workspace where the monitor was created, and the caller must be the original creator of the monitor. - + Certain configuration fields, such as output asset identifiers, cannot be updated. - + :param table_name: str UC table name in format `catalog.schema.table_name`. This field corresponds to the {full_table_name_arg} arg in the endpoint path. @@ -244,6 +244,6 @@ Configuration for monitoring snapshot tables. :param time_series: :class:`MonitorTimeSeries` (optional) Configuration for monitoring time series tables. - + :returns: :class:`MonitorInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/registered_models.rst b/docs/workspace/catalog/registered_models.rst index 4e6b0aa6b..9c51cad7a 100644 --- a/docs/workspace/catalog/registered_models.rst +++ b/docs/workspace/catalog/registered_models.rst @@ -7,17 +7,17 @@ Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. Models in Unity Catalog provide centralized access control, auditing, lineage, and discovery of ML models across Databricks workspaces. - + An MLflow registered model resides in the third layer of Unity Catalog’s three-level namespace. Registered models contain model versions, which correspond to actual ML models (MLflow models). Creating new model versions currently requires use of the MLflow Python client. Once model versions are created, you can load them for batch inference using MLflow Python client APIs, or deploy them for real-time serving using Databricks Model Serving. - + All operations on registered models and model versions require USE_CATALOG permissions on the enclosing catalog and USE_SCHEMA permissions on the enclosing schema. In addition, the following additional privileges are required for various operations: - + * To create a registered model, users must additionally have the CREATE_MODEL permission on the target schema. * To view registered model or model version metadata, model version data files, or invoke a model version, users must additionally have the EXECUTE permission on the registered model * To update @@ -25,22 +25,22 @@ registered model * To update other registered model or model version metadata (comments, aliases) create a new model version, or update permissions on the registered model, users must be owners of the registered model. - + Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants) that specify a securable type, use "FUNCTION" as the securable type. .. py:method:: create(catalog_name: str, schema_name: str, name: str [, comment: Optional[str], storage_location: Optional[str]]) -> RegisteredModelInfo Creates a new registered model in Unity Catalog. - + File storage for model versions in the registered model will be located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. - + For registered model creation to succeed, the user must satisfy the following conditions: - The caller must be a metastore admin, or be the owner of the parent catalog and schema, or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the parent schema. - + :param catalog_name: str The name of the catalog where the schema and the registered model reside :param schema_name: str @@ -51,48 +51,48 @@ The comment attached to the registered model :param storage_location: str (optional) The storage location on the cloud under which model version data files are stored - + :returns: :class:`RegisteredModelInfo` .. py:method:: delete(full_name: str) Deletes a registered model and all its model versions from the specified parent catalog and schema. - + The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str The three-level (fully qualified) name of the registered model - - + + .. py:method:: delete_alias(full_name: str, alias: str) Deletes a registered model alias. - + The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str The three-level (fully qualified) name of the registered model :param alias: str The name of the alias - - + + .. py:method:: get(full_name: str [, include_aliases: Optional[bool], include_browse: Optional[bool]]) -> RegisteredModelInfo Get a registered model. - + The caller must be a metastore admin or an owner of (or have the **EXECUTE** privilege on) the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str The three-level (fully qualified) name of the registered model :param include_aliases: bool (optional) @@ -100,7 +100,7 @@ :param include_browse: bool (optional) Whether to include registered models in the response for which the principal can only access selective metadata for - + :returns: :class:`RegisteredModelInfo` @@ -108,15 +108,15 @@ List registered models. You can list registered models under a particular schema, or list all registered models in the current metastore. - + The returned models are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the registered models. A regular user needs to be the owner or have the **EXECUTE** privilege on the registered model to recieve the registered models in the response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + There is no guarantee of a specific ordering of the elements in the response. - + :param catalog_name: str (optional) The identifier of the catalog under which to list registered models. If specified, schema_name must be specified. @@ -125,13 +125,13 @@ selective metadata for :param max_results: int (optional) Max number of registered models to return. - + If both catalog and schema are specified: - when max_results is not specified, the page length is set to a server configured value (10000, as of 4/2/2024). - when set to a value greater than 0, the page length is the minimum of this value and a server configured value (10000, as of 4/2/2024); - when set to 0, the page length is set to a server configured value (10000, as of 4/2/2024); - when set to a value less than 0, an invalid parameter error is returned; - + If neither schema nor catalog is specified: - when max_results is not specified, the page length is set to a server configured value (100, as of 4/2/2024). - when set to a value greater than 0, the page length is the minimum of this value and a server configured value (1000, as of 4/2/2024); - @@ -142,38 +142,38 @@ :param schema_name: str (optional) The identifier of the schema under which to list registered models. If specified, catalog_name must be specified. - + :returns: Iterator over :class:`RegisteredModelInfo` .. py:method:: set_alias(full_name: str, alias: str, version_num: int) -> RegisteredModelAlias Set an alias on the specified registered model. - + The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str Full name of the registered model :param alias: str The name of the alias :param version_num: int The version number of the model version to which the alias points - + :returns: :class:`RegisteredModelAlias` .. py:method:: update(full_name: str [, comment: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> RegisteredModelInfo Updates the specified registered model. - + The caller must be a metastore admin or an owner of the registered model. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + Currently only the name, the owner or the comment of the registered model can be updated. - + :param full_name: str The three-level (fully qualified) name of the registered model :param comment: str (optional) @@ -182,6 +182,6 @@ New name for the registered model. :param owner: str (optional) The identifier of the user who owns the registered model - + :returns: :class:`RegisteredModelInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/resource_quotas.rst b/docs/workspace/catalog/resource_quotas.rst index dc7df22ac..61eb7d2a5 100644 --- a/docs/workspace/catalog/resource_quotas.rst +++ b/docs/workspace/catalog/resource_quotas.rst @@ -8,23 +8,22 @@ can be created. Quotas are expressed in terms of a resource type and a parent (for example, tables per metastore or schemas per catalog). The resource quota APIs enable you to monitor your current usage and limits. For more information on resource quotas see the [Unity Catalog documentation]. - - [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas + [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas .. py:method:: get_quota(parent_securable_type: str, parent_full_name: str, quota_name: str) -> GetQuotaResponse The GetQuota API returns usage information for a single resource quota, defined as a child-parent pair. This API also refreshes the quota count if it is out of date. Refreshes are triggered asynchronously. The updated count might not be returned in the first call. - + :param parent_securable_type: str Securable type of the quota parent. :param parent_full_name: str Full name of the parent resource. Provide the metastore ID if the parent is a metastore. :param quota_name: str Name of the quota. Follows the pattern of the quota type, with "-quota" added as a suffix. - + :returns: :class:`GetQuotaResponse` @@ -32,11 +31,11 @@ ListQuotas returns all quota values under the metastore. There are no SLAs on the freshness of the counts returned. This API does not trigger a refresh of quota counts. - + :param max_results: int (optional) The number of quotas to return. :param page_token: str (optional) Opaque token for the next page of results. - + :returns: Iterator over :class:`QuotaInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/rfa.rst b/docs/workspace/catalog/rfa.rst index 3019403bb..a67ed73a6 100644 --- a/docs/workspace/catalog/rfa.rst +++ b/docs/workspace/catalog/rfa.rst @@ -6,7 +6,7 @@ Request for Access enables customers to request access to and manage access request destinations for Unity Catalog securables. - + These APIs provide a standardized way to update, get, and request to access request destinations. Fine-grained authorization ensures that only users with appropriate permissions can manage access request destinations. @@ -16,16 +16,16 @@ Creates access requests for Unity Catalog permissions for a specified principal on a securable object. This Batch API can take in multiple principals, securable objects, and permissions as the input and returns the access request destinations for each. Principals must be unique across the API call. - + The supported securable types are: "metastore", "catalog", "schema", "table", "external_location", "connection", "credential", "function", "registered_model", and "volume". - + :param requests: List[:class:`CreateAccessRequest`] (optional) A list of individual access requests, where each request corresponds to a set of permissions being requested on a list of securables for a specified principal. - + At most 30 requests per API call. - + :returns: :class:`BatchCreateAccessRequestsResponse` @@ -34,15 +34,15 @@ Gets an array of access request destinations for the specified securable. Any caller can see URL destinations or the destinations on the metastore. Otherwise, only those with **BROWSE** permissions on the securable can see destinations. - + The supported securable types are: "metastore", "catalog", "schema", "table", "external_location", "connection", "credential", "function", "registered_model", and "volume". - + :param securable_type: str The type of the securable. :param full_name: str The full name of the securable. - + :returns: :class:`AccessRequestDestinations` @@ -55,10 +55,10 @@ securable. A maximum of 5 emails and 5 external notification destinations (Slack, Microsoft Teams, and Generic Webhook destinations) can be assigned to a securable. If a URL destination is assigned, no other destinations can be set. - + The supported securable types are: "metastore", "catalog", "schema", "table", "external_location", "connection", "credential", "function", "registered_model", and "volume". - + :param access_request_destinations: :class:`AccessRequestDestinations` The access request destinations to assign to the securable. For each destination, a **destination_id** and **destination_type** must be defined. @@ -68,10 +68,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`AccessRequestDestinations` \ No newline at end of file diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index fd1d323a1..4c685f536 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -32,7 +32,7 @@ Creates a new schema for catalog in the Metastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog. - + :param name: str Name of schema, relative to parent catalog. :param catalog_name: str @@ -43,7 +43,7 @@ A map of key-value properties attached to the securable. :param storage_root: str (optional) Storage root URL for managed tables within schema. - + :returns: :class:`SchemaInfo` @@ -51,13 +51,13 @@ Deletes the specified schema from the parent catalog. The caller must be the owner of the schema or an owner of the parent catalog. - + :param full_name: str Full name of the schema. :param force: bool (optional) Force deletion even if the schema is not empty. - - + + .. py:method:: get(full_name: str [, include_browse: Optional[bool]]) -> SchemaInfo @@ -85,13 +85,13 @@ Gets the specified schema within the metastore. The caller must be a metastore admin, the owner of the schema, or a user that has the **USE_SCHEMA** privilege on the schema. - + :param full_name: str Full name of the schema. :param include_browse: bool (optional) Whether to include schemas in the response for which the principal can only access selective metadata for - + :returns: :class:`SchemaInfo` @@ -119,7 +119,7 @@ owner of the parent catalog, all schemas for the catalog will be retrieved. Otherwise, only schemas owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is no guarantee of a specific ordering of the elements in the array. - + :param catalog_name: str Parent catalog for schemas of interest. :param include_browse: bool (optional) @@ -132,7 +132,7 @@ (recommended); - when set to a value less than 0, an invalid parameter error is returned; :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`SchemaInfo` @@ -163,7 +163,7 @@ the caller is a metastore admin, only the __owner__ field can be changed in the update. If the __name__ field must be updated, the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege on the parent catalog. - + :param full_name: str Full name of the schema. :param comment: str (optional) @@ -176,6 +176,6 @@ Username of current owner of schema. :param properties: Dict[str,str] (optional) A map of key-value properties attached to the securable. - + :returns: :class:`SchemaInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 5fe0bb70f..62f87bf99 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -9,9 +9,9 @@ control which users and groups can access the credential. If a user does not have access to a storage credential in Unity Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant on the user’s behalf. - + Databricks recommends using external locations rather than using storage credentials directly. - + To create storage credentials, you must be a Databricks account admin. The account admin who creates the storage credential can delegate ownership to another user or group to manage permissions on it. @@ -39,10 +39,10 @@ w.storage_credentials.delete(delete=created.name) Creates a new storage credential. - + The caller must be a metastore admin or have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. - + :param name: str The credential name. The name must be unique among storage and service credentials within the metastore. @@ -63,7 +63,7 @@ **STORAGE**. :param skip_validation: bool (optional) Supplying true to this argument skips validation of the created credential. - + :returns: :class:`StorageCredentialInfo` @@ -71,14 +71,14 @@ Deletes a storage credential from the metastore. The caller must be an owner of the storage credential. - + :param name: str Name of the storage credential. :param force: bool (optional) Force an update even if there are dependent external locations or external tables (when purpose is **STORAGE**) or dependent services (when purpose is **SERVICE**). - - + + .. py:method:: get(name: str) -> StorageCredentialInfo @@ -108,10 +108,10 @@ Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential. - + :param name: str Name of the storage credential. - + :returns: :class:`StorageCredentialInfo` @@ -132,7 +132,7 @@ only those storage credentials the caller has permission to access. If the caller is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific ordering of the elements in the array. - + :param max_results: int (optional) Maximum number of storage credentials to return. If not set, all the storage credentials are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of @@ -141,7 +141,7 @@ returned; :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`StorageCredentialInfo` @@ -175,10 +175,10 @@ w.storage_credentials.delete(delete=created.name) Updates a storage credential on the metastore. - + The caller must be the owner of the storage credential or a metastore admin. If the caller is a metastore admin, only the **owner** field can be changed. - + :param name: str Name of the storage credential. :param aws_iam_role: :class:`AwsIamRoleRequest` (optional) @@ -206,7 +206,7 @@ **STORAGE**. :param skip_validation: bool (optional) Supplying true to this argument skips validation of the updated credential. - + :returns: :class:`StorageCredentialInfo` @@ -216,12 +216,12 @@ provided. If only one of them is provided, it will be used for validation. And if both are provided, the __url__ will be used for validation, and __external_location_name__ will be ignored when checking overlapping urls. - + Either the __storage_credential_name__ or the cloud-specific credential must be provided. - + The caller must be a metastore admin or the storage credential owner or have the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage credential. - + :param aws_iam_role: :class:`AwsIamRoleRequest` (optional) The AWS IAM role configuration. :param azure_managed_identity: :class:`AzureManagedIdentityRequest` (optional) @@ -240,6 +240,6 @@ Required. The name of an existing credential or long-lived cloud credential to validate. :param url: str (optional) The external location url to validate. - + :returns: :class:`ValidateStorageCredentialResponse` \ No newline at end of file diff --git a/docs/workspace/catalog/system_schemas.rst b/docs/workspace/catalog/system_schemas.rst index 6c94e5349..5f02fa439 100644 --- a/docs/workspace/catalog/system_schemas.rst +++ b/docs/workspace/catalog/system_schemas.rst @@ -11,35 +11,35 @@ Disables the system schema and removes it from the system catalog. The caller must be an account admin or a metastore admin. - + :param metastore_id: str The metastore ID under which the system schema lives. :param schema_name: str Full name of the system schema. - - + + .. py:method:: enable(metastore_id: str, schema_name: str [, catalog_name: Optional[str]]) Enables the system schema and adds it to the system catalog. The caller must be an account admin or a metastore admin. - + :param metastore_id: str The metastore ID under which the system schema lives. :param schema_name: str Full name of the system schema. :param catalog_name: str (optional) the catalog for which the system schema is to enabled in - - + + .. py:method:: list(metastore_id: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[SystemSchemaInfo] Gets an array of system schemas for a metastore. The caller must be an account admin or a metastore admin. - + :param metastore_id: str The ID for the metastore in which the system schema resides. :param max_results: int (optional) @@ -49,6 +49,6 @@ is returned; - If not set, all the schemas are returned (not recommended). :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`SystemSchemaInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/table_constraints.rst b/docs/workspace/catalog/table_constraints.rst index 2813e6835..2d3317c03 100644 --- a/docs/workspace/catalog/table_constraints.rst +++ b/docs/workspace/catalog/table_constraints.rst @@ -5,45 +5,45 @@ .. py:class:: TableConstraintsAPI Primary key and foreign key constraints encode relationships between fields in tables. - + Primary and foreign keys are informational only and are not enforced. Foreign keys must reference a primary key in another table. This primary key is the parent constraint of the foreign key and the table this primary key is on is the parent table of the foreign key. Similarly, the foreign key is the child constraint of its referenced primary key; the table of the foreign key is the child table of the primary key. - + You can declare primary keys and foreign keys as part of the table specification during table creation. You can also add or drop constraints on existing tables. .. py:method:: create(full_name_arg: str, constraint: TableConstraint) -> TableConstraint Creates a new table constraint. - + For the table constraint creation to succeed, the user must satisfy both of these conditions: - the user must have the **USE_CATALOG** privilege on the table's parent catalog, the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. - if the new constraint is a __ForeignKeyConstraint__, the user must have the **USE_CATALOG** privilege on the referenced parent table's catalog, the **USE_SCHEMA** privilege on the referenced parent table's schema, and be the owner of the referenced parent table. - + :param full_name_arg: str The full name of the table referenced by the constraint. :param constraint: :class:`TableConstraint` - + :returns: :class:`TableConstraint` .. py:method:: delete(full_name: str, constraint_name: str, cascade: bool) Deletes a table constraint. - + For the table constraint deletion to succeed, the user must satisfy both of these conditions: - the user must have the **USE_CATALOG** privilege on the table's parent catalog, the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. - if __cascade__ argument is **true**, the user must have the following permissions on all of the child tables: the **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** privilege on the table's schema, and be the owner of the table. - + :param full_name: str Full name of the table referenced by the constraint. :param constraint_name: str @@ -51,6 +51,6 @@ :param cascade: bool If true, try deleting all child constraints of the current constraint. If false, reject this operation if the current constraint has any child constraints. - - + + \ No newline at end of file diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index c4b6dad3d..31e465ec0 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -9,31 +9,31 @@ have the USE_CATALOG permission on its parent catalog. To query a table, users must have the SELECT permission on the table, and they must have the USE_CATALOG permission on its parent catalog and the USE_SCHEMA permission on its parent schema. - + A table can be managed or external. From an API perspective, a __VIEW__ is a particular kind of table (rather than a managed or external table). .. py:method:: create(name: str, catalog_name: str, schema_name: str, table_type: TableType, data_source_format: DataSourceFormat, storage_location: str [, columns: Optional[List[ColumnInfo]], properties: Optional[Dict[str, str]]]) -> TableInfo Creates a new table in the specified catalog and schema. - + To create an external delta table, the caller must have the **EXTERNAL_USE_SCHEMA** privilege on the parent schema and the **EXTERNAL_USE_LOCATION** privilege on the external location. These privileges must always be granted explicitly, and cannot be inherited through ownership or **ALL_PRIVILEGES**. - + Standard UC permissions needed to create tables still apply: **USE_CATALOG** on the parent catalog (or ownership of the parent catalog), **CREATE_TABLE** and **USE_SCHEMA** on the parent schema (or ownership of the parent schema), and **CREATE_EXTERNAL_TABLE** on external location. - + The **columns** field needs to be in a Spark compatible format, so we recommend you use Spark to create these tables. The API itself does not validate the correctness of the column spec. If the spec is not Spark compatible, the tables may not be readable by Databricks Runtime. - + NOTE: The Create Table API for external clients only supports creating **external delta tables**. The values shown in the respective enums are all values supported by Databricks, however for this specific Create Table API, only **table_type** **EXTERNAL** and **data_source_format** **DELTA** are supported. Additionally, column masks are not supported when creating tables through this API. - + :param name: str Name of table, relative to parent schema. :param catalog_name: str @@ -48,7 +48,7 @@ The array of __ColumnInfo__ definitions of the table's columns. :param properties: Dict[str,str] (optional) A map of key-value properties attached to the securable. - + :returns: :class:`TableInfo` @@ -58,11 +58,11 @@ parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str Full name of the table. - - + + .. py:method:: exists(full_name: str) -> TableExistsResponse @@ -73,10 +73,10 @@ **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, and either be the table owner or have the **SELECT** privilege on the table. * Have **BROWSE** privilege on the parent catalog * Have **BROWSE** privilege on the parent schema - + :param full_name: str Full name of the table. - + :returns: :class:`TableExistsResponse` @@ -125,7 +125,7 @@ the parent schema and have the **USE_CATALOG** privilege on the parent catalog * Have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, and either be the table owner or have the **SELECT** privilege on the table. - + :param full_name: str Full name of the table. :param include_browse: bool (optional) @@ -135,7 +135,7 @@ Whether delta metadata should be included in the response. :param include_manifest_capabilities: bool (optional) Whether to include a manifest containing table capabilities in the response. - + :returns: :class:`TableInfo` @@ -167,7 +167,7 @@ latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. There is no guarantee of a specific ordering of the elements in the array. - + :param catalog_name: str Name of parent catalog for tables of interest. :param schema_name: str @@ -191,7 +191,7 @@ not. :param page_token: str (optional) Opaque token to send for the next page of results (pagination). - + :returns: Iterator over :class:`TableInfo` @@ -220,15 +220,15 @@ Gets an array of summaries for tables for a schema and catalog within the metastore. The table summaries returned are either: - + * summaries for tables (within the current metastore and parent catalog and schema), when the user is a metastore admin, or: * summaries for tables and schemas (within the current metastore and parent catalog) for which the user has ownership or the **SELECT** privilege on the table and ownership or **USE_SCHEMA** privilege on the schema, provided that the user also has ownership or the **USE_CATALOG** privilege on the parent catalog. - + There is no guarantee of a specific ordering of the elements in the array. - + :param catalog_name: str Name of parent catalog for tables of interest. :param include_manifest_capabilities: bool (optional) @@ -245,7 +245,7 @@ A sql LIKE pattern (% and _) for schema names. All schemas will be returned if not set or empty. :param table_name_pattern: str (optional) A sql LIKE pattern (% and _) for table names. All tables will be returned if not set or empty. - + :returns: Iterator over :class:`TableSummary` @@ -255,11 +255,11 @@ **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param full_name: str Full name of the table. :param owner: str (optional) Username of current owner of table. - - + + \ No newline at end of file diff --git a/docs/workspace/catalog/temporary_path_credentials.rst b/docs/workspace/catalog/temporary_path_credentials.rst index 6694f39f8..748a7d1d1 100644 --- a/docs/workspace/catalog/temporary_path_credentials.rst +++ b/docs/workspace/catalog/temporary_path_credentials.rst @@ -10,20 +10,20 @@ provider has its own type of credentials: AWS uses temporary session tokens via AWS Security Token Service (STS), Azure utilizes Shared Access Signatures (SAS) for its data storage services, and Google Cloud supports temporary credentials through OAuth 2.0. - + Temporary path credentials ensure that data access is limited in scope and duration, reducing the risk of unauthorized access or misuse. To use the temporary path credentials API, a metastore admin needs to enable the external_access_enabled flag (off by default) at the metastore level. A user needs to be granted the EXTERNAL USE LOCATION permission by external location owner. For requests on existing external tables, user also needs to be granted the EXTERNAL USE SCHEMA permission at the schema level by catalog admin. - + Note that EXTERNAL USE SCHEMA is a schema level permission that can only be granted by catalog admin explicitly and is not included in schema ownership or ALL PRIVILEGES on the schema for security reasons. Similarly, EXTERNAL USE LOCATION is an external location level permission that can only be granted by external location owner explicitly and is not included in external location ownership or ALL PRIVILEGES on the external location for security reasons. - + This API only supports temporary path credentials for external locations and external tables, and volumes will be supported in the future. @@ -37,7 +37,7 @@ external location owners. For requests on existing external tables, the caller must also have the **EXTERNAL_USE_SCHEMA** privilege on the parent schema; this privilege can only be granted by catalog owners. - + :param url: str URL for path-based access. :param operation: :class:`PathOperation` @@ -46,6 +46,6 @@ Optional. When set to true, the service will not validate that the generated credentials can perform write operations, therefore no new paths will be created and the response will not contain valid credentials. Defaults to false. - + :returns: :class:`GenerateTemporaryPathCredentialResponse` \ No newline at end of file diff --git a/docs/workspace/catalog/temporary_table_credentials.rst b/docs/workspace/catalog/temporary_table_credentials.rst index 54ccd79b0..0ef1511ad 100644 --- a/docs/workspace/catalog/temporary_table_credentials.rst +++ b/docs/workspace/catalog/temporary_table_credentials.rst @@ -10,7 +10,7 @@ provider has its own type of credentials: AWS uses temporary session tokens via AWS Security Token Service (STS), Azure utilizes Shared Access Signatures (SAS) for its data storage services, and Google Cloud supports temporary credentials through OAuth 2.0. - + Temporary table credentials ensure that data access is limited in scope and duration, reducing the risk of unauthorized access or misuse. To use the temporary table credentials API, a metastore admin needs to enable the external_access_enabled flag (off by default) at the metastore level, and user needs to be @@ -24,12 +24,12 @@ must have **external_access_enabled** flag set to true (default false). The caller must have the **EXTERNAL_USE_SCHEMA** privilege on the parent schema and this privilege can only be granted by catalog owners. - + :param operation: :class:`TableOperation` (optional) The operation performed against the table data, either READ or READ_WRITE. If READ_WRITE is specified, the credentials returned will have write permissions, otherwise, it will be read only. :param table_id: str (optional) UUID of the table to read or write. - + :returns: :class:`GenerateTemporaryTableCredentialResponse` \ No newline at end of file diff --git a/docs/workspace/catalog/volumes.rst b/docs/workspace/catalog/volumes.rst index 78d84fd57..88dcf3cc0 100644 --- a/docs/workspace/catalog/volumes.rst +++ b/docs/workspace/catalog/volumes.rst @@ -59,21 +59,21 @@ w.volumes.delete(name=created_volume.full_name) Creates a new volume. - + The user could create either an external volume or a managed volume. An external volume will be created in the specified external location, while a managed volume will be located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. - + For the volume creation to succeed, the user must satisfy following conditions: - The caller must be a metastore admin, or be the owner of the parent catalog and schema, or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The caller must have **CREATE VOLUME** privilege on the parent schema. - + For an external volume, following conditions also need to satisfy - The caller must have **CREATE EXTERNAL VOLUME** privilege on the external location. - There are no other tables, nor volumes existing in the specified storage location. - The specified storage location is not under the location of other tables, nor volumes, or catalogs or schemas. - + :param catalog_name: str The name of the catalog where the schema and the volume are :param schema_name: str @@ -85,22 +85,22 @@ The comment attached to the volume :param storage_location: str (optional) The storage location on the cloud - + :returns: :class:`VolumeInfo` .. py:method:: delete(name: str) Deletes a volume from the specified parent catalog and schema. - + The caller must be a metastore admin or an owner of the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param name: str The three-level (fully qualified) name of the volume - - + + .. py:method:: list(catalog_name: str, schema_name: str [, include_browse: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[VolumeInfo] @@ -127,15 +127,15 @@ w.catalogs.delete(name=created_catalog.name, force=True) Gets an array of volumes for the current metastore under the parent catalog and schema. - + The returned volumes are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the volumes. A regular user needs to be the owner or have the **READ VOLUME** privilege on the volume to recieve the volumes in the response. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + There is no guarantee of a specific ordering of the elements in the array. - + :param catalog_name: str The identifier of the catalog :param schema_name: str @@ -145,20 +145,20 @@ metadata for :param max_results: int (optional) Maximum number of volumes to return (page length). - + If not set, the page length is set to a server configured value (10000, as of 1/29/2024). - when set to a value greater than 0, the page length is the minimum of this value and a server configured value (10000, as of 1/29/2024); - when set to 0, the page length is set to a server configured value (10000, as of 1/29/2024) (recommended); - when set to a value less than 0, an invalid parameter error is returned; - + Note: this parameter controls only the maximum number of volumes to return. The actual number of volumes returned in a page may be smaller than this value, including 0, even if there are more pages. :param page_token: str (optional) Opaque token returned by a previous request. It must be included in the request to retrieve the next page of results (pagination). - + :returns: Iterator over :class:`VolumeInfo` @@ -212,17 +212,17 @@ w.volumes.delete(name=created_volume.full_name) Gets a volume from the metastore for a specific catalog and schema. - + The caller must be a metastore admin or an owner of (or have the **READ VOLUME** privilege on) the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + :param name: str The three-level (fully qualified) name of the volume :param include_browse: bool (optional) Whether to include volumes in the response for which the principal can only access selective metadata for - + :returns: :class:`VolumeInfo` @@ -278,13 +278,13 @@ w.volumes.delete(name=created_volume.full_name) Updates the specified volume under the specified parent catalog and schema. - + The caller must be a metastore admin or an owner of the volume. For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - + Currently only the name, the owner or the comment of the volume could be updated. - + :param name: str The three-level (fully qualified) name of the volume :param comment: str (optional) @@ -293,6 +293,6 @@ New name for the volume. :param owner: str (optional) The identifier of the user who owns the volume - + :returns: :class:`VolumeInfo` \ No newline at end of file diff --git a/docs/workspace/catalog/workspace_bindings.rst b/docs/workspace/catalog/workspace_bindings.rst index 6287456d9..e562479cc 100644 --- a/docs/workspace/catalog/workspace_bindings.rst +++ b/docs/workspace/catalog/workspace_bindings.rst @@ -7,16 +7,16 @@ A securable in Databricks can be configured as __OPEN__ or __ISOLATED__. An __OPEN__ securable can be accessed from any workspace, while an __ISOLATED__ securable can only be accessed from a configured list of workspaces. This API allows you to configure (bind) securables to workspaces. - + NOTE: The __isolation_mode__ is configured for the securable itself (using its Update method) and the workspace bindings are only consulted when the securable's __isolation_mode__ is set to __ISOLATED__. - + A securable's workspace bindings can be configured by a metastore admin or the owner of the securable. - + The original path (/api/2.1/unity-catalog/workspace-bindings/catalogs/{name}) is deprecated. Please use the new path (/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which introduces the ability to bind a securable in READ_ONLY mode (catalogs only). - + Securable types that support binding: - catalog - storage_credential - credential - external_location .. py:method:: get(name: str) -> GetCatalogWorkspaceBindingsResponse @@ -41,10 +41,10 @@ Gets workspace bindings of the catalog. The caller must be a metastore admin or an owner of the catalog. - + :param name: str The name of the catalog. - + :returns: :class:`GetCatalogWorkspaceBindingsResponse` @@ -52,7 +52,7 @@ Gets workspace bindings of the securable. The caller must be a metastore admin or an owner of the securable. - + :param securable_type: str The type of the securable to bind to a workspace (catalog, storage_credential, credential, or external_location). @@ -65,7 +65,7 @@ error is returned; - If not set, all the workspace bindings are returned (not recommended). :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`WorkspaceBinding` @@ -94,14 +94,14 @@ Updates workspace bindings of the catalog. The caller must be a metastore admin or an owner of the catalog. - + :param name: str The name of the catalog. :param assign_workspaces: List[int] (optional) A list of workspace IDs. :param unassign_workspaces: List[int] (optional) A list of workspace IDs. - + :returns: :class:`UpdateCatalogWorkspaceBindingsResponse` @@ -109,7 +109,7 @@ Updates workspace bindings of the securable. The caller must be a metastore admin or an owner of the securable. - + :param securable_type: str The type of the securable to bind to a workspace (catalog, storage_credential, credential, or external_location). @@ -119,6 +119,6 @@ List of workspace bindings. :param remove: List[:class:`WorkspaceBinding`] (optional) List of workspace bindings. - + :returns: :class:`UpdateWorkspaceBindingsResponse` \ No newline at end of file diff --git a/docs/workspace/cleanrooms/clean_room_asset_revisions.rst b/docs/workspace/cleanrooms/clean_room_asset_revisions.rst index dccbeab03..3888732c1 100644 --- a/docs/workspace/cleanrooms/clean_room_asset_revisions.rst +++ b/docs/workspace/cleanrooms/clean_room_asset_revisions.rst @@ -9,7 +9,7 @@ .. py:method:: get(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str, etag: str) -> CleanRoomAsset Get a specific revision of an asset - + :param clean_room_name: str Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` @@ -18,14 +18,14 @@ Name of the asset. :param etag: str Revision etag to fetch. If not provided, the latest revision will be returned. - + :returns: :class:`CleanRoomAsset` .. py:method:: list(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[CleanRoomAsset] List revisions for an asset - + :param clean_room_name: str Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` @@ -36,6 +36,6 @@ Maximum number of asset revisions to return. Defaults to 10. :param page_token: str (optional) Opaque pagination token to go to next page based on the previous query. - + :returns: Iterator over :class:`CleanRoomAsset` \ No newline at end of file diff --git a/docs/workspace/cleanrooms/clean_room_assets.rst b/docs/workspace/cleanrooms/clean_room_assets.rst index 90e136cd7..69a4c09d8 100644 --- a/docs/workspace/cleanrooms/clean_room_assets.rst +++ b/docs/workspace/cleanrooms/clean_room_assets.rst @@ -13,19 +13,19 @@ asset that is added through this method, the clean room owner must also have enough privilege on the asset to consume it. The privilege must be maintained indefinitely for the clean room to be able to access the asset. Typically, you should use a group as the clean room owner. - + :param clean_room_name: str The name of the clean room this asset belongs to. This field is required for create operations and populated by the server for responses. :param asset: :class:`CleanRoomAsset` - + :returns: :class:`CleanRoomAsset` .. py:method:: create_clean_room_asset_review(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str [, notebook_review: Optional[NotebookVersionReview]]) -> CreateCleanRoomAssetReviewResponse Submit an asset review - + :param clean_room_name: str Name of the clean room :param asset_type: :class:`CleanRoomAssetAssetType` @@ -33,47 +33,47 @@ :param name: str Name of the asset :param notebook_review: :class:`NotebookVersionReview` (optional) - + :returns: :class:`CreateCleanRoomAssetReviewResponse` .. py:method:: delete(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str) Delete a clean room asset - unshare/remove the asset from the clean room - + :param clean_room_name: str Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` The type of the asset. :param name: str The fully qualified name of the asset, it is same as the name field in CleanRoomAsset. - - + + .. py:method:: get(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str) -> CleanRoomAsset Get the details of a clean room asset by its type and full name. - + :param clean_room_name: str Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` The type of the asset. :param name: str The fully qualified name of the asset, it is same as the name field in CleanRoomAsset. - + :returns: :class:`CleanRoomAsset` .. py:method:: list(clean_room_name: str [, page_token: Optional[str]]) -> Iterator[CleanRoomAsset] List assets. - + :param clean_room_name: str Name of the clean room. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`CleanRoomAsset` @@ -81,7 +81,7 @@ Update a clean room asset. For example, updating the content of a notebook; changing the shared partitions of a table; etc. - + :param clean_room_name: str Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` @@ -89,15 +89,15 @@ :param name: str A fully qualified name that uniquely identifies the asset within the clean room. This is also the name displayed in the clean room UI. - + For UC securable assets (tables, volumes, etc.), the format is *shared_catalog*.*shared_schema*.*asset_name* - + For notebooks, the name is the notebook file name. For jar analyses, the name is the jar analysis name. :param asset: :class:`CleanRoomAsset` The asset to update. The asset's `name` and `asset_type` fields are used to identify the asset to update. - + :returns: :class:`CleanRoomAsset` \ No newline at end of file diff --git a/docs/workspace/cleanrooms/clean_room_auto_approval_rules.rst b/docs/workspace/cleanrooms/clean_room_auto_approval_rules.rst index 317fabcd8..5bf6d0075 100644 --- a/docs/workspace/cleanrooms/clean_room_auto_approval_rules.rst +++ b/docs/workspace/cleanrooms/clean_room_auto_approval_rules.rst @@ -10,57 +10,57 @@ .. py:method:: create(clean_room_name: str, auto_approval_rule: CleanRoomAutoApprovalRule) -> CleanRoomAutoApprovalRule Create an auto-approval rule - + :param clean_room_name: str The name of the clean room this auto-approval rule belongs to. :param auto_approval_rule: :class:`CleanRoomAutoApprovalRule` - + :returns: :class:`CleanRoomAutoApprovalRule` .. py:method:: delete(clean_room_name: str, rule_id: str) Delete a auto-approval rule by rule ID - + :param clean_room_name: str :param rule_id: str - - + + .. py:method:: get(clean_room_name: str, rule_id: str) -> CleanRoomAutoApprovalRule Get a auto-approval rule by rule ID - + :param clean_room_name: str :param rule_id: str - + :returns: :class:`CleanRoomAutoApprovalRule` .. py:method:: list(clean_room_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[CleanRoomAutoApprovalRule] List all auto-approval rules for the caller - + :param clean_room_name: str :param page_size: int (optional) Maximum number of auto-approval rules to return. Defaults to 100. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`CleanRoomAutoApprovalRule` .. py:method:: update(clean_room_name: str, rule_id: str, auto_approval_rule: CleanRoomAutoApprovalRule) -> CleanRoomAutoApprovalRule Update a auto-approval rule by rule ID - + :param clean_room_name: str The name of the clean room this auto-approval rule belongs to. :param rule_id: str A generated UUID identifying the rule. :param auto_approval_rule: :class:`CleanRoomAutoApprovalRule` The auto-approval rule to update. The rule_id field is used to identify the rule to update. - + :returns: :class:`CleanRoomAutoApprovalRule` \ No newline at end of file diff --git a/docs/workspace/cleanrooms/clean_room_task_runs.rst b/docs/workspace/cleanrooms/clean_room_task_runs.rst index 716008d83..8d3631c29 100644 --- a/docs/workspace/cleanrooms/clean_room_task_runs.rst +++ b/docs/workspace/cleanrooms/clean_room_task_runs.rst @@ -9,7 +9,7 @@ .. py:method:: list(clean_room_name: str [, notebook_name: Optional[str], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[CleanRoomNotebookTaskRun] List all the historical notebook task runs in a clean room. - + :param clean_room_name: str Name of the clean room. :param notebook_name: str (optional) @@ -18,6 +18,6 @@ The maximum number of task runs to return. Currently ignored - all runs will be returned. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`CleanRoomNotebookTaskRun` \ No newline at end of file diff --git a/docs/workspace/cleanrooms/clean_rooms.rst b/docs/workspace/cleanrooms/clean_rooms.rst index 1059cf784..c6e94a5f1 100644 --- a/docs/workspace/cleanrooms/clean_rooms.rst +++ b/docs/workspace/cleanrooms/clean_rooms.rst @@ -15,11 +15,11 @@ :method:cleanrooms/get method. When this method returns, the clean room will be in a PROVISIONING state, with only name, owner, comment, created_at and status populated. The clean room will be usable once it enters an ACTIVE state. - + The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore. - + :param clean_room: :class:`CleanRoom` - + :returns: Long-running operation waiter for :class:`CleanRoom`. See :method:wait_get_clean_room_active for more details. @@ -31,11 +31,11 @@ .. py:method:: create_output_catalog(clean_room_name: str, output_catalog: CleanRoomOutputCatalog) -> CreateCleanRoomOutputCatalogResponse Create the output catalog of the clean room. - + :param clean_room_name: str Name of the clean room. :param output_catalog: :class:`CleanRoomOutputCatalog` - + :returns: :class:`CreateCleanRoomOutputCatalogResponse` @@ -44,19 +44,19 @@ Delete a clean room. After deletion, the clean room will be removed from the metastore. If the other collaborators have not deleted the clean room, they will still have the clean room in their metastore, but it will be in a DELETED state and no operations other than deletion can be performed on it. - + :param name: str Name of the clean room. - - + + .. py:method:: get(name: str) -> CleanRoom Get the details of a clean room given its name. - + :param name: str - + :returns: :class:`CleanRoom` @@ -64,12 +64,12 @@ Get a list of all clean rooms of the metastore. Only clean rooms the caller has access to are returned. - + :param page_size: int (optional) Maximum number of clean rooms to return (i.e., the page length). Defaults to 100. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`CleanRoom` @@ -77,13 +77,13 @@ Update a clean room. The caller must be the owner of the clean room, have **MODIFY_CLEAN_ROOM** privilege, or be metastore admin. - + When the caller is a metastore admin, only the __owner__ field can be updated. - + :param name: str Name of the clean room. :param clean_room: :class:`CleanRoom` (optional) - + :returns: :class:`CleanRoom` diff --git a/docs/workspace/compute/cluster_policies.rst b/docs/workspace/compute/cluster_policies.rst index a224b2d2c..a4715ea15 100644 --- a/docs/workspace/compute/cluster_policies.rst +++ b/docs/workspace/compute/cluster_policies.rst @@ -7,18 +7,18 @@ You can use cluster policies to control users' ability to configure clusters based on a set of rules. These rules specify which attributes or attribute values can be used during cluster creation. Cluster policies have ACLs that limit their use to specific users and groups. - + With cluster policies, you can: - Auto-install cluster libraries on the next restart by listing them in the policy's "libraries" field (Public Preview). - Limit users to creating clusters with the prescribed settings. - Simplify the user interface, enabling more users to create clusters, by fixing and hiding some fields. - Manage costs by setting limits on attributes that impact the hourly rate. - + Cluster policy permissions limit which policies a user can select in the Policy drop-down when the user creates a cluster: - A user who has unrestricted cluster create permission can select the Unrestricted policy and create fully-configurable clusters. - A user who has both unrestricted cluster create permission and access to cluster policies can select the Unrestricted policy and policies they have access to. - A user that has access to only cluster policies, can select the policies they have access to. - + If no policies exist in the workspace, the Policy drop-down doesn't appear. Only admin users can create, edit, and delete policies. Admin users also have access to all policies. @@ -50,10 +50,10 @@ w.cluster_policies.delete(policy_id=created.policy_id) Creates a new policy with prescribed settings. - + :param definition: str (optional) Policy definition document expressed in [Databricks Cluster Policy Definition Language]. - + [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html :param description: str (optional) Additional human-readable description of the cluster policy. @@ -69,29 +69,29 @@ :param policy_family_definition_overrides: str (optional) Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON document must be passed as a string and cannot be embedded in the requests. - + You can use this to customize the policy definition inherited from the policy family. Policy rules specified here are merged into the inherited policy definition. - + [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html :param policy_family_id: str (optional) ID of the policy family. The cluster policy's policy definition inherits the policy family's policy definition. - + Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize the policy definition. - + :returns: :class:`CreatePolicyResponse` .. py:method:: delete(policy_id: str) Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited. - + :param policy_id: str The ID of the policy to delete. - - + + .. py:method:: edit(policy_id: str [, definition: Optional[str], description: Optional[str], libraries: Optional[List[Library]], max_clusters_per_user: Optional[int], name: Optional[str], policy_family_definition_overrides: Optional[str], policy_family_id: Optional[str]]) @@ -137,12 +137,12 @@ Update an existing policy for cluster. This operation may make some clusters governed by the previous policy invalid. - + :param policy_id: str The ID of the policy to update. :param definition: str (optional) Policy definition document expressed in [Databricks Cluster Policy Definition Language]. - + [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html :param description: str (optional) Additional human-readable description of the cluster policy. @@ -158,19 +158,19 @@ :param policy_family_definition_overrides: str (optional) Policy definition JSON document expressed in [Databricks Policy Definition Language]. The JSON document must be passed as a string and cannot be embedded in the requests. - + You can use this to customize the policy definition inherited from the policy family. Policy rules specified here are merged into the inherited policy definition. - + [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html :param policy_family_id: str (optional) ID of the policy family. The cluster policy's policy definition inherits the policy family's policy definition. - + Cannot be used with `definition`. Use `policy_family_definition_overrides` instead to customize the policy definition. - - + + .. py:method:: get(policy_id: str) -> Policy @@ -203,20 +203,20 @@ w.cluster_policies.delete(policy_id=created.policy_id) Get a cluster policy entity. Creation and editing is available to admins only. - + :param policy_id: str Canonical unique identifier for the Cluster Policy. - + :returns: :class:`Policy` .. py:method:: get_permission_levels(cluster_policy_id: str) -> GetClusterPolicyPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param cluster_policy_id: str The cluster policy for which to get or manage permissions. - + :returns: :class:`GetClusterPolicyPermissionLevelsResponse` @@ -224,10 +224,10 @@ Gets the permissions of a cluster policy. Cluster policies can inherit permissions from their root object. - + :param cluster_policy_id: str The cluster policy for which to get or manage permissions. - + :returns: :class:`ClusterPolicyPermissions` @@ -246,14 +246,14 @@ all = w.cluster_policies.list(compute.ListClusterPoliciesRequest()) Returns a list of policies accessible by the requesting user. - + :param sort_column: :class:`ListSortColumn` (optional) The cluster policy attribute to sort by. * `POLICY_CREATION_TIME` - Sort result list by policy creation time. * `POLICY_NAME` - Sort result list by policy name. :param sort_order: :class:`ListSortOrder` (optional) The order in which the policies get listed. * `DESC` - Sort result list in descending order. * `ASC` - Sort result list in ascending order. - + :returns: Iterator over :class:`Policy` @@ -261,11 +261,11 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param cluster_policy_id: str The cluster policy for which to get or manage permissions. :param access_control_list: List[:class:`ClusterPolicyAccessControlRequest`] (optional) - + :returns: :class:`ClusterPolicyPermissions` @@ -273,10 +273,10 @@ Updates the permissions on a cluster policy. Cluster policies can inherit permissions from their root object. - + :param cluster_policy_id: str The cluster policy for which to get or manage permissions. :param access_control_list: List[:class:`ClusterPolicyAccessControlRequest`] (optional) - + :returns: :class:`ClusterPolicyPermissions` \ No newline at end of file diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index d46b8ecd0..35e566c2a 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -5,22 +5,22 @@ .. py:class:: ClustersExt The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. - + Databricks maps cluster node instance types to compute units known as DBUs. See the instance type pricing page for a list of the supported instance types and their corresponding DBUs. - + A Databricks cluster is a set of computation resources and configurations on which you run data engineering, data science, and data analytics workloads, such as production ETL pipelines, streaming analytics, ad-hoc analytics, and machine learning. - + You run these workloads as a set of commands in a notebook or as an automated job. Databricks makes a distinction between all-purpose clusters and job clusters. You use all-purpose clusters to analyze data collaboratively using interactive notebooks. You use job clusters to run fast and robust automated jobs. - + You can create an all-purpose cluster using the UI, CLI, or REST API. You can manually terminate and restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive analysis. - + IMPORTANT: Databricks retains cluster configuration information for terminated clusters for 30 days. To keep an all-purpose cluster configuration even after it has been terminated for more than 30 days, an administrator can pin a cluster to the cluster list. @@ -62,12 +62,12 @@ Change the owner of the cluster. You must be an admin and the cluster must be terminated to perform this operation. The service principal application ID can be supplied as an argument to `owner_username`. - + :param cluster_id: str :param owner_username: str New owner of the cluster_id after this RPC. - - + + .. py:method:: create(spark_version: str [, apply_policy_default_values: Optional[bool], autoscale: Optional[AutoScale], autotermination_minutes: Optional[int], aws_attributes: Optional[AwsAttributes], azure_attributes: Optional[AzureAttributes], clone_from: Optional[CloneCluster], cluster_log_conf: Optional[ClusterLogConf], cluster_name: Optional[str], custom_tags: Optional[Dict[str, str]], data_security_mode: Optional[DataSecurityMode], docker_image: Optional[DockerImage], driver_instance_pool_id: Optional[str], driver_node_type_id: Optional[str], enable_elastic_disk: Optional[bool], enable_local_disk_encryption: Optional[bool], gcp_attributes: Optional[GcpAttributes], init_scripts: Optional[List[InitScriptInfo]], instance_pool_id: Optional[str], is_single_node: Optional[bool], kind: Optional[Kind], node_type_id: Optional[str], num_workers: Optional[int], policy_id: Optional[str], remote_disk_throughput: Optional[int], runtime_engine: Optional[RuntimeEngine], single_user_name: Optional[str], spark_conf: Optional[Dict[str, str]], spark_env_vars: Optional[Dict[str, str]], ssh_public_keys: Optional[List[str]], total_initial_remote_disk_size: Optional[int], use_ml_runtime: Optional[bool], workload_type: Optional[WorkloadType]]) -> Wait[ClusterDetails] @@ -105,15 +105,15 @@ usable once it enters a ``RUNNING`` state. Note: Databricks may not be able to acquire some of the requested nodes, due to cloud provider limitations (account limits, spot price, etc.) or transient network issues. - + If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. Otherwise the cluster will terminate with an informative error message. - + Rather than authoring the cluster's JSON definition from scratch, Databricks recommends filling out the [create compute UI] and then copying the generated JSON definition from the UI. - + [create compute UI]: https://docs.databricks.com/compute/configure.html - + :param spark_version: str The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call. @@ -148,9 +148,9 @@ :param custom_tags: Dict[str,str] (optional) Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS instances and EBS volumes) with these tags in addition to `default_tags`. Notes: - + - Currently, Databricks allows at most 45 custom tags - + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags :param data_security_mode: :class:`DataSecurityMode` (optional) :param docker_image: :class:`DockerImage` (optional) @@ -161,7 +161,7 @@ :param driver_node_type_id: str (optional) The node type of the Spark driver. Note that this field is optional; if unset, the driver node type will be set as the same value as `node_type_id` defined above. - + This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence. @@ -182,7 +182,7 @@ The optional ID of the instance pool to which the cluster belongs. :param is_single_node: bool (optional) This field can only be used when `kind = CLASSIC_PREVIEW`. - + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` :param kind: :class:`Kind` (optional) @@ -194,7 +194,7 @@ :param num_workers: int (optional) Number of worker nodes that this cluster should have. A cluster has one Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. - + Note: When reading the properties of a cluster, this field reflects the desired number of workers rather than the actual current number of workers. For instance, if a cluster is resized from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10 workers, whereas @@ -207,10 +207,10 @@ for GCP HYPERDISK_BALANCED disks. :param runtime_engine: :class:`RuntimeEngine` (optional) Determines the cluster's runtime engine, either standard or Photon. - + This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. - + If left unspecified, the runtime engine defaults to standard unless the spark_version contains -photon-, in which case Photon will be used. :param single_user_name: str (optional) @@ -223,11 +223,11 @@ An object containing a set of optional, user-specified environment variable key-value pairs. Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the driver and workers. - + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default databricks managed environmental variables are included as well. - + Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` @@ -240,11 +240,11 @@ supported for GCP HYPERDISK_BALANCED disks. :param use_ml_runtime: bool (optional) This field can only be used when `kind = CLASSIC_PREVIEW`. - + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. :param workload_type: :class:`WorkloadType` (optional) - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_running for more details. @@ -287,10 +287,10 @@ Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. Once the termination has completed, the cluster will be in a `TERMINATED` state. If the cluster is already in a `TERMINATING` or `TERMINATED` state, nothing will happen. - + :param cluster_id: str The cluster to be terminated. - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_terminated for more details. @@ -339,16 +339,16 @@ Updates the configuration of a cluster to match the provided attributes and size. A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. - + If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes can take effect. - + If a cluster is updated while in a `TERMINATED` state, it will remain `TERMINATED`. The next time it is started using the `clusters/start` API, the new attributes will take effect. Any attempt to update a cluster in any other state will be rejected with an `INVALID_STATE` error code. - + Clusters created by the Databricks Jobs service cannot be edited. - + :param cluster_id: str ID of the cluster :param spark_version: str @@ -383,9 +383,9 @@ :param custom_tags: Dict[str,str] (optional) Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS instances and EBS volumes) with these tags in addition to `default_tags`. Notes: - + - Currently, Databricks allows at most 45 custom tags - + - Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags :param data_security_mode: :class:`DataSecurityMode` (optional) :param docker_image: :class:`DockerImage` (optional) @@ -396,7 +396,7 @@ :param driver_node_type_id: str (optional) The node type of the Spark driver. Note that this field is optional; if unset, the driver node type will be set as the same value as `node_type_id` defined above. - + This field, along with node_type_id, should not be set if virtual_cluster_size is set. If both driver_node_type_id, node_type_id, and virtual_cluster_size are specified, driver_node_type_id and node_type_id take precedence. @@ -417,7 +417,7 @@ The optional ID of the instance pool to which the cluster belongs. :param is_single_node: bool (optional) This field can only be used when `kind = CLASSIC_PREVIEW`. - + When set to true, Databricks will automatically set single node related `custom_tags`, `spark_conf`, and `num_workers` :param kind: :class:`Kind` (optional) @@ -429,7 +429,7 @@ :param num_workers: int (optional) Number of worker nodes that this cluster should have. A cluster has one Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. - + Note: When reading the properties of a cluster, this field reflects the desired number of workers rather than the actual current number of workers. For instance, if a cluster is resized from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10 workers, whereas @@ -442,10 +442,10 @@ for GCP HYPERDISK_BALANCED disks. :param runtime_engine: :class:`RuntimeEngine` (optional) Determines the cluster's runtime engine, either standard or Photon. - + This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. - + If left unspecified, the runtime engine defaults to standard unless the spark_version contains -photon-, in which case Photon will be used. :param single_user_name: str (optional) @@ -458,11 +458,11 @@ An object containing a set of optional, user-specified environment variable key-value pairs. Please note that key-value pair of the form (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the driver and workers. - + In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all default databricks managed environmental variables are included as well. - + Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` @@ -475,11 +475,11 @@ supported for GCP HYPERDISK_BALANCED disks. :param use_ml_runtime: bool (optional) This field can only be used when `kind = CLASSIC_PREVIEW`. - + `effective_spark_version` is determined by `spark_version` (DBR release), this field `use_ml_runtime`, and whether `node_type_id` is gpu node or not. :param workload_type: :class:`WorkloadType` (optional) - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_running for more details. @@ -546,7 +546,7 @@ Retrieves a list of events about the activity of a cluster. This API is paginated. If there are more events to read, the response includes all the parameters necessary to request the next page of events. - + :param cluster_id: str The ID of the cluster to retrieve events about. :param end_time: int (optional) @@ -555,12 +555,12 @@ An optional set of event types to filter on. If empty, all event types are returned. :param limit: int (optional) Deprecated: use page_token in combination with page_size instead. - + The maximum number of events to include in a page of events. Defaults to 50, and maximum allowed value is 500. :param offset: int (optional) Deprecated: use page_token in combination with page_size instead. - + The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the results are requested in descending order, the end_time field is required. :param order: :class:`GetEventsOrder` (optional) @@ -575,7 +575,7 @@ previous page of events respectively. If page_token is empty, the first page is returned. :param start_time: int (optional) The start time in epoch milliseconds. If empty, returns events starting from the beginning of time. - + :returns: Iterator over :class:`ClusterEvent` @@ -612,30 +612,30 @@ Retrieves the information for a cluster given its identifier. Clusters can be described while they are running, or up to 60 days after they are terminated. - + :param cluster_id: str The cluster about which to retrieve information. - + :returns: :class:`ClusterDetails` .. py:method:: get_permission_levels(cluster_id: str) -> GetClusterPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param cluster_id: str The cluster for which to get or manage permissions. - + :returns: :class:`GetClusterPermissionLevelsResponse` .. py:method:: get_permissions(cluster_id: str) -> ClusterPermissions Gets the permissions of a cluster. Clusters can inherit permissions from their root object. - + :param cluster_id: str The cluster for which to get or manage permissions. - + :returns: :class:`ClusterPermissions` @@ -655,7 +655,7 @@ Return information about all pinned and active clusters, and all clusters terminated within the last 30 days. Clusters terminated prior to this period are not included. - + :param filter_by: :class:`ListClustersFilterBy` (optional) Filters to apply to the list of clusters. :param page_size: int (optional) @@ -666,7 +666,7 @@ previous page of clusters respectively. :param sort_by: :class:`ListClustersSortBy` (optional) Sort the list of clusters by a specific criteria. - + :returns: Iterator over :class:`ClusterDetails` @@ -684,8 +684,8 @@ nodes = w.clusters.list_node_types() Returns a list of supported Spark node types. These node types can be used to launch a cluster. - - + + :returns: :class:`ListNodeTypesResponse` @@ -693,8 +693,8 @@ Returns a list of availability zones where clusters can be created in (For example, us-west-2a). These zones can be used to launch a cluster. - - + + :returns: :class:`ListAvailableZonesResponse` @@ -702,14 +702,14 @@ Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously removed. - + In addition, users will no longer see permanently deleted clusters in the cluster list, and API users can no longer perform any action on permanently deleted clusters. - + :param cluster_id: str The cluster to be deleted. - - + + .. py:method:: pin(cluster_id: str) @@ -745,10 +745,10 @@ Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. Pinning a cluster that is already pinned will have no effect. This API can only be called by workspace admins. - + :param cluster_id: str - - + + .. py:method:: resize(cluster_id: str [, autoscale: Optional[AutoScale], num_workers: Optional[int]]) -> Wait[ClusterDetails] @@ -784,7 +784,7 @@ Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a `RUNNING` state. - + :param cluster_id: str The cluster to be resized. :param autoscale: :class:`AutoScale` (optional) @@ -793,13 +793,13 @@ :param num_workers: int (optional) Number of worker nodes that this cluster should have. A cluster has one Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 Spark nodes. - + Note: When reading the properties of a cluster, this field reflects the desired number of workers rather than the actual current number of workers. For instance, if a cluster is resized from 5 to 10 workers, this field will immediately be updated to reflect the target size of 10 workers, whereas the workers listed in `spark_info` will gradually increase from 5 to 10 as the new nodes are provisioned. - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_running for more details. @@ -841,11 +841,11 @@ Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a `RUNNING` state, nothing will happen. - + :param cluster_id: str The cluster to be started. :param restart_user: str (optional) - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_running for more details. @@ -919,19 +919,19 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param cluster_id: str The cluster for which to get or manage permissions. :param access_control_list: List[:class:`ClusterAccessControlRequest`] (optional) - + :returns: :class:`ClusterPermissions` .. py:method:: spark_versions() -> GetSparkVersionsResponse Returns the list of available Spark versions. These versions can be used to launch a cluster. - - + + :returns: :class:`GetSparkVersionsResponse` @@ -971,10 +971,10 @@ cluster size. - If the previous cluster was an autoscaling cluster, the current cluster starts with the minimum number of nodes. - If the cluster is not currently in a ``TERMINATED`` state, nothing will happen. - Clusters launched to run a job cannot be started. - + :param cluster_id: str The cluster to be started. - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_running for more details. @@ -1017,10 +1017,10 @@ Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API. Unpinning a cluster that is not pinned will have no effect. This API can only be called by workspace admins. - + :param cluster_id: str - - + + .. py:method:: update(cluster_id: str, update_mask: str [, cluster: Optional[UpdateClusterResource]]) -> Wait[ClusterDetails] @@ -1033,25 +1033,25 @@ is started using the `clusters/start` API. Attempts to update a cluster in any other state will be rejected with an `INVALID_STATE` error code. Clusters created by the Databricks Jobs service cannot be updated. - + :param cluster_id: str ID of the cluster. :param update_mask: str Used to specify which cluster attributes and size fields to update. See https://google.aip.dev/161 for more details. - + The field mask must be a single string, with multiple fields separated by commas (no spaces). The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. :param cluster: :class:`UpdateClusterResource` (optional) The cluster to be updated. - + :returns: Long-running operation waiter for :class:`ClusterDetails`. See :method:wait_get_cluster_running for more details. @@ -1063,11 +1063,11 @@ .. py:method:: update_permissions(cluster_id: str [, access_control_list: Optional[List[ClusterAccessControlRequest]]]) -> ClusterPermissions Updates the permissions on a cluster. Clusters can inherit permissions from their root object. - + :param cluster_id: str The cluster for which to get or manage permissions. :param access_control_list: List[:class:`ClusterAccessControlRequest`] (optional) - + :returns: :class:`ClusterPermissions` diff --git a/docs/workspace/compute/command_execution.rst b/docs/workspace/compute/command_execution.rst index a5d8b2a79..52417b1a1 100644 --- a/docs/workspace/compute/command_execution.rst +++ b/docs/workspace/compute/command_execution.rst @@ -10,13 +10,13 @@ .. py:method:: cancel( [, cluster_id: Optional[str], command_id: Optional[str], context_id: Optional[str]]) -> Wait[CommandStatusResponse] Cancels a currently running command within an execution context. - + The command ID is obtained from a prior successful call to __execute__. - + :param cluster_id: str (optional) :param command_id: str (optional) :param context_id: str (optional) - + :returns: Long-running operation waiter for :class:`CommandStatusResponse`. See :method:wait_command_status_command_execution_cancelled for more details. @@ -28,23 +28,23 @@ .. py:method:: command_status(cluster_id: str, context_id: str, command_id: str) -> CommandStatusResponse Gets the status of and, if available, the results from a currently executing command. - + The command ID is obtained from a prior successful call to __execute__. - + :param cluster_id: str :param context_id: str :param command_id: str - + :returns: :class:`CommandStatusResponse` .. py:method:: context_status(cluster_id: str, context_id: str) -> ContextStatusResponse Gets the status for an execution context. - + :param cluster_id: str :param context_id: str - + :returns: :class:`ContextStatusResponse` @@ -70,13 +70,13 @@ w.command_execution.destroy(cluster_id=cluster_id, context_id=context.id) Creates an execution context for running cluster commands. - + If successful, this method returns the ID of the new execution context. - + :param cluster_id: str (optional) Running cluster id :param language: :class:`Language` (optional) - + :returns: Long-running operation waiter for :class:`ContextStatusResponse`. See :method:wait_context_status_command_execution_running for more details. @@ -88,11 +88,11 @@ .. py:method:: destroy(cluster_id: str, context_id: str) Deletes an execution context. - + :param cluster_id: str :param context_id: str - - + + .. py:method:: execute( [, cluster_id: Optional[str], command: Optional[str], context_id: Optional[str], language: Optional[Language]]) -> Wait[CommandStatusResponse] @@ -124,9 +124,9 @@ w.command_execution.destroy(cluster_id=cluster_id, context_id=context.id) Runs a cluster command in the given execution context, using the provided language. - + If successful, it returns an ID for tracking the status of the command's execution. - + :param cluster_id: str (optional) Running cluster id :param command: str (optional) @@ -134,7 +134,7 @@ :param context_id: str (optional) Running context id :param language: :class:`Language` (optional) - + :returns: Long-running operation waiter for :class:`CommandStatusResponse`. See :method:wait_command_status_command_execution_finished_or_error for more details. diff --git a/docs/workspace/compute/global_init_scripts.rst b/docs/workspace/compute/global_init_scripts.rst index eb01c9f63..b34768d93 100644 --- a/docs/workspace/compute/global_init_scripts.rst +++ b/docs/workspace/compute/global_init_scripts.rst @@ -6,7 +6,7 @@ The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace. These scripts run on every node in every cluster in the workspace. - + **Important:** Existing clusters must be restarted to pick up any changes made to global init scripts. Global init scripts are run in order. If the init script returns with a bad exit code, the Apache Spark container fails to launch and init scripts with later position are skipped. If enough containers fail, the @@ -37,7 +37,7 @@ w.global_init_scripts.delete(script_id=created.script_id) Creates a new global init script in this workspace. - + :param name: str The name of the script :param script: str @@ -47,25 +47,25 @@ :param position: int (optional) The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. - + If you omit the numeric position for a new global init script, it defaults to last position. It will run after all current scripts. Setting any value greater than the position of the last script is equivalent to the last position. Example: Take three existing scripts with positions 0, 1, and 2. Any position of (3) or greater puts the script in the last position. If an explicit position value conflicts with an existing script value, your request succeeds, but the original script at that position and all later scripts have their positions incremented by 1. - + :returns: :class:`CreateResponse` .. py:method:: delete(script_id: str) Deletes a global init script. - + :param script_id: str The ID of the global init script. - - + + .. py:method:: get(script_id: str) -> GlobalInitScriptDetailsWithContent @@ -95,10 +95,10 @@ w.global_init_scripts.delete(script_id=created.script_id) Gets all the details of a script, including its Base64-encoded contents. - + :param script_id: str The ID of the global init script. - + :returns: :class:`GlobalInitScriptDetailsWithContent` @@ -118,8 +118,8 @@ Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. To retrieve the contents of a script, use the [get a global init script](:method:globalinitscripts/get) operation. - - + + :returns: Iterator over :class:`GlobalInitScriptDetails` @@ -155,7 +155,7 @@ Updates a global init script, specifying only the fields to change. All fields are optional. Unspecified fields retain their current value. - + :param script_id: str The ID of the global init script. :param name: str @@ -167,13 +167,13 @@ :param position: int (optional) The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. To move the script to run first, set its position to 0. - + To move the script to the end, set its position to any value greater or equal to the position of the last script. Example, three existing scripts with positions 0, 1, and 2. Any position value of 2 or greater puts the script in the last position (2). - + If an explicit position value conflicts with an existing script, your request succeeds, but the original script at that position and all later scripts have their positions incremented by 1. - - + + \ No newline at end of file diff --git a/docs/workspace/compute/instance_pools.rst b/docs/workspace/compute/instance_pools.rst index 8be26a2d6..36a450cd2 100644 --- a/docs/workspace/compute/instance_pools.rst +++ b/docs/workspace/compute/instance_pools.rst @@ -6,16 +6,16 @@ Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times. - + Databricks pools reduce cluster start and auto-scaling times by maintaining a set of idle, ready-to-use instances. When a cluster is attached to a pool, cluster nodes are created using the pool’s idle instances. If the pool has no idle instances, the pool expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a cluster releases an instance, it returns to the pool and is free for another cluster to use. Only clusters attached to a pool can use that pool’s idle instances. - + You can specify a different pool for the driver node and worker nodes, or use the same pool for both. - + Databricks does not charge DBUs while instances are idle in the pool. Instance provider billing does apply. See pricing. @@ -40,7 +40,7 @@ w.instance_pools.delete(instance_pool_id=created.instance_pool_id) Creates a new instance pool using idle and ready-to-use cloud instances. - + :param instance_pool_name: str Pool name requested by the user. Pool name must be unique. Length must be between 1 and 100 characters. @@ -58,7 +58,7 @@ :param custom_tags: Dict[str,str] (optional) Additional tags for pool resources. Databricks will tag all pool resources (e.g., AWS instances and EBS volumes) with these tags in addition to `default_tags`. Notes: - + - Currently, Databricks allows at most 45 custom tags :param disk_spec: :class:`DiskSpec` (optional) Defines the specification of the disks that will be attached to all spark containers. @@ -93,18 +93,18 @@ :param total_initial_remote_disk_size: int (optional) If set, what the total initial volume size (in GB) of the remote disks should be. Currently only supported for GCP HYPERDISK_BALANCED types. - + :returns: :class:`CreateInstancePoolResponse` .. py:method:: delete(instance_pool_id: str) Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously. - + :param instance_pool_id: str The instance pool to be terminated. - - + + .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], remote_disk_throughput: Optional[int], total_initial_remote_disk_size: Optional[int]]) @@ -134,7 +134,7 @@ w.instance_pools.delete(instance_pool_id=created.instance_pool_id) Modifies the configuration of an existing instance pool. - + :param instance_pool_id: str Instance pool ID :param instance_pool_name: str @@ -148,7 +148,7 @@ :param custom_tags: Dict[str,str] (optional) Additional tags for pool resources. Databricks will tag all pool resources (e.g., AWS instances and EBS volumes) with these tags in addition to `default_tags`. Notes: - + - Currently, Databricks allows at most 45 custom tags :param idle_instance_autotermination_minutes: int (optional) Automatically terminates the extra instances in the pool cache after they are inactive for this time @@ -168,8 +168,8 @@ :param total_initial_remote_disk_size: int (optional) If set, what the total initial volume size (in GB) of the remote disks should be. Currently only supported for GCP HYPERDISK_BALANCED types. - - + + .. py:method:: get(instance_pool_id: str) -> GetInstancePool @@ -195,20 +195,20 @@ w.instance_pools.delete(instance_pool_id=created.instance_pool_id) Retrieve the information for an instance pool based on its identifier. - + :param instance_pool_id: str The canonical unique identifier for the instance pool. - + :returns: :class:`GetInstancePool` .. py:method:: get_permission_levels(instance_pool_id: str) -> GetInstancePoolPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param instance_pool_id: str The instance pool for which to get or manage permissions. - + :returns: :class:`GetInstancePoolPermissionLevelsResponse` @@ -216,10 +216,10 @@ Gets the permissions of an instance pool. Instance pools can inherit permissions from their root object. - + :param instance_pool_id: str The instance pool for which to get or manage permissions. - + :returns: :class:`InstancePoolPermissions` @@ -237,8 +237,8 @@ all = w.instance_pools.list() Gets a list of instance pools with their statistics. - - + + :returns: Iterator over :class:`InstancePoolAndStats` @@ -246,11 +246,11 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param instance_pool_id: str The instance pool for which to get or manage permissions. :param access_control_list: List[:class:`InstancePoolAccessControlRequest`] (optional) - + :returns: :class:`InstancePoolPermissions` @@ -258,10 +258,10 @@ Updates the permissions on an instance pool. Instance pools can inherit permissions from their root object. - + :param instance_pool_id: str The instance pool for which to get or manage permissions. :param access_control_list: List[:class:`InstancePoolAccessControlRequest`] (optional) - + :returns: :class:`InstancePoolPermissions` \ No newline at end of file diff --git a/docs/workspace/compute/instance_profiles.rst b/docs/workspace/compute/instance_profiles.rst index 7a5192bd5..e98d78b52 100644 --- a/docs/workspace/compute/instance_profiles.rst +++ b/docs/workspace/compute/instance_profiles.rst @@ -7,9 +7,8 @@ The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with. Regular users can list the instance profiles available to them. See [Secure access to S3 buckets] using instance profiles for more information. - - [Secure access to S3 buckets]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html + [Secure access to S3 buckets]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html .. py:method:: add(instance_profile_arn: str [, iam_role_arn: Optional[str], is_meta_instance_profile: Optional[bool], skip_validation: Optional[bool]]) @@ -32,18 +31,18 @@ Registers an instance profile in Databricks. In the UI, you can then give users the permission to use this instance profile when launching clusters. - + This API is only available to admin users. - + :param instance_profile_arn: str The AWS ARN of the instance profile to register with Databricks. This field is required. :param iam_role_arn: str (optional) The AWS IAM role ARN of the role associated with the instance profile. This field is required if your role name and instance profile name do not match and you want to use the instance profile with [Databricks SQL Serverless]. - + Otherwise, this field is optional. - + [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html :param is_meta_instance_profile: bool (optional) Boolean flag indicating whether the instance profile should only be used in credential passthrough @@ -56,8 +55,8 @@ fails with an error message that does not indicate an IAM related permission issue, (e.g. “Your requested instance type is not supported in your requested availability zone”), you can pass this flag to skip the validation and forcibly add the instance profile. - - + + .. py:method:: edit(instance_profile_arn: str [, iam_role_arn: Optional[str], is_meta_instance_profile: Optional[bool]]) @@ -80,34 +79,34 @@ The only supported field to change is the optional IAM role ARN associated with the instance profile. It is required to specify the IAM role ARN if both of the following are true: - + * Your role name and instance profile name do not match. The name is the part after the last slash in each ARN. * You want to use the instance profile with [Databricks SQL Serverless]. - + To understand where these fields are in the AWS console, see [Enable serverless SQL warehouses]. - + This API is only available to admin users. - + [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html - + :param instance_profile_arn: str The AWS ARN of the instance profile to register with Databricks. This field is required. :param iam_role_arn: str (optional) The AWS IAM role ARN of the role associated with the instance profile. This field is required if your role name and instance profile name do not match and you want to use the instance profile with [Databricks SQL Serverless]. - + Otherwise, this field is optional. - + [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html :param is_meta_instance_profile: bool (optional) Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios. If true, it means the instance profile contains an meta IAM role which could assume a wide range of roles. Therefore it should always be used with authorization. This field is optional, the default value is `false`. - - + + .. py:method:: list() -> Iterator[InstanceProfile] @@ -124,10 +123,10 @@ all = w.instance_profiles.list() List the instance profiles that the calling user can use to launch a cluster. - + This API is available to all users. - - + + :returns: Iterator over :class:`InstanceProfile` @@ -135,11 +134,11 @@ Remove the instance profile with the provided ARN. Existing clusters with this instance profile will continue to function. - + This API is only accessible to admin users. - + :param instance_profile_arn: str The ARN of the instance profile to remove. This field is required. - - + + \ No newline at end of file diff --git a/docs/workspace/compute/libraries.rst b/docs/workspace/compute/libraries.rst index dfafea7aa..89ff2cc9c 100644 --- a/docs/workspace/compute/libraries.rst +++ b/docs/workspace/compute/libraries.rst @@ -6,15 +6,15 @@ The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster. - + To make third-party or custom code available to notebooks and jobs running on your clusters, you can install a library. Libraries can be written in Python, Java, Scala, and R. You can upload Python, Java, Scala and R libraries and point to external packages in PyPI, Maven, and CRAN repositories. - + Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library directly from a public repository such as PyPI or Maven, using a previously installed workspace library, or using an init script. - + When you uninstall a library from a cluster, the library is removed only when you restart the cluster. Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart. @@ -22,8 +22,8 @@ Get the status of all libraries on all clusters. A status is returned for all libraries installed on this cluster via the API or the libraries UI. - - + + :returns: Iterator over :class:`ClusterLibraryStatuses` @@ -34,10 +34,10 @@ set to be installed on this cluster, in the order that the libraries were added to the cluster, are returned first. 2. Libraries that were previously requested to be installed on this cluster or, but are now marked for removal, in no particular order, are returned last. - + :param cluster_id: str Unique identifier of the cluster whose status should be retrieved. - + :returns: Iterator over :class:`LibraryFullStatus` @@ -45,24 +45,24 @@ Add libraries to install on a cluster. The installation is asynchronous; it happens in the background after the completion of this request. - + :param cluster_id: str Unique identifier for the cluster on which to install these libraries. :param libraries: List[:class:`Library`] The libraries to install. - - + + .. py:method:: uninstall(cluster_id: str, libraries: List[Library]) Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is restarted. A request to uninstall a library that is not currently installed is ignored. - + :param cluster_id: str Unique identifier for the cluster on which to uninstall these libraries. :param libraries: List[:class:`Library`] The libraries to uninstall. - - + + \ No newline at end of file diff --git a/docs/workspace/compute/policy_compliance_for_clusters.rst b/docs/workspace/compute/policy_compliance_for_clusters.rst index 92c1fc4cb..af7132373 100644 --- a/docs/workspace/compute/policy_compliance_for_clusters.rst +++ b/docs/workspace/compute/policy_compliance_for_clusters.rst @@ -6,10 +6,10 @@ The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace. - + A cluster is compliant with its policy if its configuration satisfies all its policy rules. Clusters could be out of compliance if their policy was updated after the cluster was last edited. - + The get and list compliance APIs allow you to view the policy compliance status of a cluster. The enforce compliance API allows you to update a cluster to be compliant with the current version of its policy. @@ -17,22 +17,22 @@ Updates a cluster to be compliant with the current version of its policy. A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. - + If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes can take effect. - + If a cluster is updated while in a `TERMINATED` state, it will remain `TERMINATED`. The next time the cluster is started, the new attributes will take effect. - + Clusters created by the Databricks Jobs, DLT, or Models services cannot be enforced by this API. Instead, use the "Enforce job policy compliance" API to enforce policy compliance on jobs. - + :param cluster_id: str The ID of the cluster you want to enforce policy compliance on. :param validate_only: bool (optional) If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster. - + :returns: :class:`EnforceClusterComplianceResponse` @@ -40,10 +40,10 @@ Returns the policy compliance status of a cluster. Clusters could be out of compliance if their policy was updated after the cluster was last edited. - + :param cluster_id: str The ID of the cluster to get the compliance status - + :returns: :class:`GetClusterComplianceResponse` @@ -51,7 +51,7 @@ Returns the policy compliance status of all clusters that use a given policy. Clusters could be out of compliance if their policy was updated after the cluster was last edited. - + :param policy_id: str Canonical unique identifier for the cluster policy. :param page_size: int (optional) @@ -60,6 +60,6 @@ :param page_token: str (optional) A page token that can be used to navigate to the next page or previous page as returned by `next_page_token` or `prev_page_token`. - + :returns: Iterator over :class:`ClusterCompliance` \ No newline at end of file diff --git a/docs/workspace/compute/policy_families.rst b/docs/workspace/compute/policy_families.rst index 1e4b6a00e..d4eeeb926 100644 --- a/docs/workspace/compute/policy_families.rst +++ b/docs/workspace/compute/policy_families.rst @@ -6,10 +6,10 @@ View available policy families. A policy family contains a policy definition providing best practices for configuring clusters for a particular use case. - + Databricks manages and provides policy families for several common cluster use cases. You cannot create, edit, or delete policy families. - + Policy families cannot be used directly to create clusters. Instead, you create cluster policies using a policy family. Cluster policies created using a policy family inherit the policy family's policy definition. @@ -31,12 +31,12 @@ first_family = w.policy_families.get(policy_family_id=all[0].policy_family_id) Retrieve the information for an policy family based on its identifier and version - + :param policy_family_id: str The family ID about which to retrieve information. :param version: int (optional) The version number for the family to fetch. Defaults to the latest version. - + :returns: :class:`PolicyFamily` @@ -56,11 +56,11 @@ Returns the list of policy definition types available to use at their latest version. This API is paginated. - + :param max_results: int (optional) Maximum number of policy families to return. :param page_token: str (optional) A token that can be used to get the next page of results. - + :returns: Iterator over :class:`PolicyFamily` \ No newline at end of file diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index c0d16b9f8..afa4ab6a7 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -13,14 +13,14 @@ Create new message in a [conversation](:method:genie/startconversation). The AI response uses all previously created messages in the conversation to respond. - + :param space_id: str The ID associated with the Genie space where the conversation is started. :param conversation_id: str The ID associated with the conversation. :param content: str User message content. - + :returns: Long-running operation waiter for :class:`GenieMessage`. See :method:wait_get_message_genie_completed for more details. @@ -32,34 +32,34 @@ .. py:method:: delete_conversation(space_id: str, conversation_id: str) Delete a conversation. - + :param space_id: str The ID associated with the Genie space where the conversation is located. :param conversation_id: str The ID of the conversation to delete. - - + + .. py:method:: delete_conversation_message(space_id: str, conversation_id: str, message_id: str) Delete a conversation message. - + :param space_id: str The ID associated with the Genie space where the message is located. :param conversation_id: str The ID associated with the conversation. :param message_id: str The ID associated with the message to delete. - - + + .. py:method:: execute_message_attachment_query(space_id: str, conversation_id: str, message_id: str, attachment_id: str) -> GenieGetMessageQueryResultResponse Execute the SQL for a message query attachment. Use this API when the query attachment has expired and needs to be re-executed. - + :param space_id: str Genie space ID :param conversation_id: str @@ -68,7 +68,7 @@ Message ID :param attachment_id: str Attachment ID - + :returns: :class:`GenieGetMessageQueryResultResponse` @@ -76,28 +76,28 @@ DEPRECATED: Use [Execute Message Attachment Query](:method:genie/executemessageattachmentquery) instead. - + :param space_id: str Genie space ID :param conversation_id: str Conversation ID :param message_id: str Message ID - + :returns: :class:`GenieGetMessageQueryResultResponse` .. py:method:: get_message(space_id: str, conversation_id: str, message_id: str) -> GenieMessage Get message from conversation. - + :param space_id: str The ID associated with the Genie space where the target conversation is located. :param conversation_id: str The ID associated with the target conversation. :param message_id: str The ID associated with the target message from the identified conversation. - + :returns: :class:`GenieMessage` @@ -105,7 +105,7 @@ Get the result of SQL query if the message has a query attachment. This is only available if a message has a query attachment and the message status is `EXECUTING_QUERY` OR `COMPLETED`. - + :param space_id: str Genie space ID :param conversation_id: str @@ -114,7 +114,7 @@ Message ID :param attachment_id: str Attachment ID - + :returns: :class:`GenieGetMessageQueryResultResponse` @@ -122,14 +122,14 @@ DEPRECATED: Use [Get Message Attachment Query Result](:method:genie/getmessageattachmentqueryresult) instead. - + :param space_id: str Genie space ID :param conversation_id: str Conversation ID :param message_id: str Message ID - + :returns: :class:`GenieGetMessageQueryResultResponse` @@ -137,7 +137,7 @@ DEPRECATED: Use [Get Message Attachment Query Result](:method:genie/getmessageattachmentqueryresult) instead. - + :param space_id: str Genie space ID :param conversation_id: str @@ -146,24 +146,24 @@ Message ID :param attachment_id: str Attachment ID - + :returns: :class:`GenieGetMessageQueryResultResponse` .. py:method:: get_space(space_id: str) -> GenieSpace Get details of a Genie Space. - + :param space_id: str The ID associated with the Genie space - + :returns: :class:`GenieSpace` .. py:method:: list_conversation_messages(space_id: str, conversation_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> GenieListConversationMessagesResponse List messages in a conversation - + :param space_id: str The ID associated with the Genie space where the conversation is located :param conversation_id: str @@ -172,43 +172,43 @@ Maximum number of messages to return per page :param page_token: str (optional) Token to get the next page of results - + :returns: :class:`GenieListConversationMessagesResponse` .. py:method:: list_conversations(space_id: str [, include_all: Optional[bool], page_size: Optional[int], page_token: Optional[str]]) -> GenieListConversationsResponse Get a list of conversations in a Genie Space. - + :param space_id: str The ID of the Genie space to retrieve conversations from. :param include_all: bool (optional) - Include all conversations in the space across all users. Requires "Can Manage" permission on the - space. + Include all conversations in the space across all users. Requires at least CAN MANAGE permission on + the space. :param page_size: int (optional) Maximum number of conversations to return per page :param page_token: str (optional) Token to get the next page of results - + :returns: :class:`GenieListConversationsResponse` .. py:method:: list_spaces( [, page_size: Optional[int], page_token: Optional[str]]) -> GenieListSpacesResponse Get list of Genie Spaces. - + :param page_size: int (optional) Maximum number of spaces to return per page :param page_token: str (optional) Pagination token for getting the next page of results - + :returns: :class:`GenieListSpacesResponse` .. py:method:: send_message_feedback(space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating [, comment: Optional[str]]) Send feedback for a message. - + :param space_id: str The ID associated with the Genie space where the message is located. :param conversation_id: str @@ -219,19 +219,19 @@ The rating (POSITIVE, NEGATIVE, or NONE). :param comment: str (optional) Optional text feedback that will be stored as a comment. - - + + .. py:method:: start_conversation(space_id: str, content: str) -> Wait[GenieMessage] Start a new conversation. - + :param space_id: str The ID associated with the Genie space where you want to start a conversation. :param content: str The text of the message that starts the conversation. - + :returns: Long-running operation waiter for :class:`GenieMessage`. See :method:wait_get_message_genie_completed for more details. @@ -243,11 +243,11 @@ .. py:method:: trash_space(space_id: str) Move a Genie Space to the trash. - + :param space_id: str The ID associated with the Genie space to be sent to the trash. - - + + .. py:method:: wait_get_message_genie_completed(conversation_id: str, message_id: str, space_id: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[GenieMessage], None]]) -> GenieMessage diff --git a/docs/workspace/dashboards/lakeview.rst b/docs/workspace/dashboards/lakeview.rst index e55aeedc8..48cea303f 100644 --- a/docs/workspace/dashboards/lakeview.rst +++ b/docs/workspace/dashboards/lakeview.rst @@ -10,42 +10,42 @@ .. py:method:: create(dashboard: Dashboard) -> Dashboard Create a draft dashboard. - + :param dashboard: :class:`Dashboard` - + :returns: :class:`Dashboard` .. py:method:: create_schedule(dashboard_id: str, schedule: Schedule) -> Schedule Create dashboard schedule. - + :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. :param schedule: :class:`Schedule` The schedule to create. A dashboard is limited to 10 schedules. - + :returns: :class:`Schedule` .. py:method:: create_subscription(dashboard_id: str, schedule_id: str, subscription: Subscription) -> Subscription Create schedule subscription. - + :param dashboard_id: str UUID identifying the dashboard to which the subscription belongs. :param schedule_id: str UUID identifying the schedule to which the subscription belongs. :param subscription: :class:`Subscription` The subscription to create. A schedule is limited to 100 subscriptions. - + :returns: :class:`Subscription` .. py:method:: delete_schedule(dashboard_id: str, schedule_id: str [, etag: Optional[str]]) Delete dashboard schedule. - + :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str @@ -53,14 +53,14 @@ :param etag: str (optional) The etag for the schedule. Optionally, it can be provided to verify that the schedule has not been modified from its last retrieval. - - + + .. py:method:: delete_subscription(dashboard_id: str, schedule_id: str, subscription_id: str [, etag: Optional[str]]) Delete schedule subscription. - + :param dashboard_id: str UUID identifying the dashboard which the subscription belongs. :param schedule_id: str @@ -70,60 +70,60 @@ :param etag: str (optional) The etag for the subscription. Can be optionally provided to ensure that the subscription has not been modified since the last read. - - + + .. py:method:: get(dashboard_id: str) -> Dashboard Get a draft dashboard. - + :param dashboard_id: str UUID identifying the dashboard. - + :returns: :class:`Dashboard` .. py:method:: get_published(dashboard_id: str) -> PublishedDashboard Get the current published dashboard. - + :param dashboard_id: str UUID identifying the published dashboard. - + :returns: :class:`PublishedDashboard` .. py:method:: get_schedule(dashboard_id: str, schedule_id: str) -> Schedule Get dashboard schedule. - + :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str UUID identifying the schedule. - + :returns: :class:`Schedule` .. py:method:: get_subscription(dashboard_id: str, schedule_id: str, subscription_id: str) -> Subscription Get schedule subscription. - + :param dashboard_id: str UUID identifying the dashboard which the subscription belongs. :param schedule_id: str UUID identifying the schedule which the subscription belongs. :param subscription_id: str UUID identifying the subscription. - + :returns: :class:`Subscription` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str], show_trashed: Optional[bool], view: Optional[DashboardView]]) -> Iterator[Dashboard] List dashboards. - + :param page_size: int (optional) The number of dashboards to return per page. :param page_token: str (optional) @@ -134,14 +134,14 @@ returned. :param view: :class:`DashboardView` (optional) `DASHBOARD_VIEW_BASIC`only includes summary metadata from the dashboard. - + :returns: Iterator over :class:`Dashboard` .. py:method:: list_schedules(dashboard_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Schedule] List dashboard schedules. - + :param dashboard_id: str UUID identifying the dashboard to which the schedules belongs. :param page_size: int (optional) @@ -149,14 +149,14 @@ :param page_token: str (optional) A page token, received from a previous `ListSchedules` call. Use this to retrieve the subsequent page. - + :returns: Iterator over :class:`Schedule` .. py:method:: list_subscriptions(dashboard_id: str, schedule_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Subscription] List schedule subscriptions. - + :param dashboard_id: str UUID identifying the dashboard which the subscriptions belongs. :param schedule_id: str @@ -166,14 +166,14 @@ :param page_token: str (optional) A page token, received from a previous `ListSubscriptions` call. Use this to retrieve the subsequent page. - + :returns: Iterator over :class:`Subscription` .. py:method:: migrate(source_dashboard_id: str [, display_name: Optional[str], parent_path: Optional[str], update_parameter_syntax: Optional[bool]]) -> Dashboard Migrates a classic SQL dashboard to Lakeview. - + :param source_dashboard_id: str UUID of the dashboard to be migrated. :param display_name: str (optional) @@ -183,14 +183,14 @@ :param update_parameter_syntax: bool (optional) Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named syntax (:param) when converting datasets in the dashboard. - + :returns: :class:`Dashboard` .. py:method:: publish(dashboard_id: str [, embed_credentials: Optional[bool], warehouse_id: Optional[str]]) -> PublishedDashboard Publish the current draft dashboard. - + :param dashboard_id: str UUID identifying the dashboard to be published. :param embed_credentials: bool (optional) @@ -198,51 +198,51 @@ embedded credentials will be used to execute the published dashboard's queries. :param warehouse_id: str (optional) The ID of the warehouse that can be used to override the warehouse which was set in the draft. - + :returns: :class:`PublishedDashboard` .. py:method:: trash(dashboard_id: str) Trash a dashboard. - + :param dashboard_id: str UUID identifying the dashboard. - - + + .. py:method:: unpublish(dashboard_id: str) Unpublish the dashboard. - + :param dashboard_id: str UUID identifying the published dashboard. - - + + .. py:method:: update(dashboard_id: str, dashboard: Dashboard) -> Dashboard Update a draft dashboard. - + :param dashboard_id: str UUID identifying the dashboard. :param dashboard: :class:`Dashboard` - + :returns: :class:`Dashboard` .. py:method:: update_schedule(dashboard_id: str, schedule_id: str, schedule: Schedule) -> Schedule Update dashboard schedule. - + :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str UUID identifying the schedule. :param schedule: :class:`Schedule` The schedule to update. - + :returns: :class:`Schedule` \ No newline at end of file diff --git a/docs/workspace/dashboards/lakeview_embedded.rst b/docs/workspace/dashboards/lakeview_embedded.rst index 80eb5bc48..ab8d484ec 100644 --- a/docs/workspace/dashboards/lakeview_embedded.rst +++ b/docs/workspace/dashboards/lakeview_embedded.rst @@ -9,13 +9,13 @@ .. py:method:: get_published_dashboard_token_info(dashboard_id: str [, external_value: Optional[str], external_viewer_id: Optional[str]]) -> GetPublishedDashboardTokenInfoResponse Get a required authorization details and scopes of a published dashboard to mint an OAuth token. - + :param dashboard_id: str UUID identifying the published dashboard. :param external_value: str (optional) Provided external value to be included in the custom claim. :param external_viewer_id: str (optional) Provided external viewer id to be included in the custom claim. - + :returns: :class:`GetPublishedDashboardTokenInfoResponse` \ No newline at end of file diff --git a/docs/workspace/database/database.rst b/docs/workspace/database/database.rst index 36e594ec7..15b1eab7d 100644 --- a/docs/workspace/database/database.rst +++ b/docs/workspace/database/database.rst @@ -9,19 +9,19 @@ .. py:method:: create_database_catalog(catalog: DatabaseCatalog) -> DatabaseCatalog Create a Database Catalog. - + :param catalog: :class:`DatabaseCatalog` - + :returns: :class:`DatabaseCatalog` .. py:method:: create_database_instance(database_instance: DatabaseInstance) -> Wait[DatabaseInstance] Create a Database Instance. - + :param database_instance: :class:`DatabaseInstance` Instance to create. - + :returns: Long-running operation waiter for :class:`DatabaseInstance`. See :method:wait_get_database_instance_database_available for more details. @@ -33,10 +33,10 @@ .. py:method:: create_database_instance_role(instance_name: str, database_instance_role: DatabaseInstanceRole) -> DatabaseInstanceRole Create a role for a Database Instance. - + :param instance_name: str :param database_instance_role: :class:`DatabaseInstanceRole` - + :returns: :class:`DatabaseInstanceRole` @@ -44,34 +44,34 @@ Create a Database Table. Useful for registering pre-existing PG tables in UC. See CreateSyncedDatabaseTable for creating synced tables in PG from a source table in UC. - + :param table: :class:`DatabaseTable` - + :returns: :class:`DatabaseTable` .. py:method:: create_synced_database_table(synced_table: SyncedDatabaseTable) -> SyncedDatabaseTable Create a Synced Database Table. - + :param synced_table: :class:`SyncedDatabaseTable` - + :returns: :class:`SyncedDatabaseTable` .. py:method:: delete_database_catalog(name: str) Delete a Database Catalog. - + :param name: str - - + + .. py:method:: delete_database_instance(name: str [, force: Optional[bool], purge: Optional[bool]]) Delete a Database Instance. - + :param name: str Name of the instance to delete. :param force: bool (optional) @@ -84,123 +84,123 @@ time (implementation pending). If true, the database instance is hard deleted and cannot be undeleted. For the time being, setting this value to true is required to delete an instance (soft delete is not yet supported). - - + + .. py:method:: delete_database_instance_role(instance_name: str, name: str [, allow_missing: Optional[bool], reassign_owned_to: Optional[str]]) Deletes a role for a Database Instance. - + :param instance_name: str :param name: str :param allow_missing: bool (optional) This is the AIP standard name for the equivalent of Postgres' `IF EXISTS` option :param reassign_owned_to: str (optional) - - + + .. py:method:: delete_database_table(name: str) Delete a Database Table. - + :param name: str - - + + .. py:method:: delete_synced_database_table(name: str) Delete a Synced Database Table. - + :param name: str - - + + .. py:method:: find_database_instance_by_uid( [, uid: Optional[str]]) -> DatabaseInstance Find a Database Instance by uid. - + :param uid: str (optional) UID of the cluster to get. - + :returns: :class:`DatabaseInstance` .. py:method:: generate_database_credential( [, claims: Optional[List[RequestedClaims]], instance_names: Optional[List[str]], request_id: Optional[str]]) -> DatabaseCredential Generates a credential that can be used to access database instances. - + :param claims: List[:class:`RequestedClaims`] (optional) The returned token will be scoped to the union of instance_names and instances containing the specified UC tables, so instance_names is allowed to be empty. :param instance_names: List[str] (optional) Instances to which the token will be scoped. :param request_id: str (optional) - + :returns: :class:`DatabaseCredential` .. py:method:: get_database_catalog(name: str) -> DatabaseCatalog Get a Database Catalog. - + :param name: str - + :returns: :class:`DatabaseCatalog` .. py:method:: get_database_instance(name: str) -> DatabaseInstance Get a Database Instance. - + :param name: str Name of the cluster to get. - + :returns: :class:`DatabaseInstance` .. py:method:: get_database_instance_role(instance_name: str, name: str) -> DatabaseInstanceRole Gets a role for a Database Instance. - + :param instance_name: str :param name: str - + :returns: :class:`DatabaseInstanceRole` .. py:method:: get_database_table(name: str) -> DatabaseTable Get a Database Table. - + :param name: str - + :returns: :class:`DatabaseTable` .. py:method:: get_synced_database_table(name: str) -> SyncedDatabaseTable Get a Synced Database Table. - + :param name: str - + :returns: :class:`SyncedDatabaseTable` .. py:method:: list_database_catalogs(instance_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseCatalog] This API is currently unimplemented, but exposed for Terraform support. - + :param instance_name: str Name of the instance to get database catalogs for. :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of synced database tables. Requests first page if absent. - + :returns: Iterator over :class:`DatabaseCatalog` @@ -209,81 +209,81 @@ START OF PG ROLE APIs Section These APIs are marked a PUBLIC with stage < PUBLIC_PREVIEW. With more recent Lakebase V2 plans, we don't plan to ever advance these to PUBLIC_PREVIEW. These APIs will remain effectively undocumented/UI-only and we'll aim for a new public roles API as part of V2 PuPr. - + :param instance_name: str :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of Database Instances. Requests first page if absent. - + :returns: Iterator over :class:`DatabaseInstanceRole` .. py:method:: list_database_instances( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseInstance] List Database Instances. - + :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of Database Instances. Requests first page if absent. - + :returns: Iterator over :class:`DatabaseInstance` .. py:method:: list_synced_database_tables(instance_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SyncedDatabaseTable] This API is currently unimplemented, but exposed for Terraform support. - + :param instance_name: str Name of the instance to get synced tables for. :param page_size: int (optional) Upper bound for items returned. :param page_token: str (optional) Pagination token to go to the next page of synced database tables. Requests first page if absent. - + :returns: Iterator over :class:`SyncedDatabaseTable` .. py:method:: update_database_catalog(name: str, database_catalog: DatabaseCatalog, update_mask: str) -> DatabaseCatalog This API is currently unimplemented, but exposed for Terraform support. - + :param name: str The name of the catalog in UC. :param database_catalog: :class:`DatabaseCatalog` Note that updating a database catalog is not yet supported. :param update_mask: str The list of fields to update. Setting this field is not yet supported. - + :returns: :class:`DatabaseCatalog` .. py:method:: update_database_instance(name: str, database_instance: DatabaseInstance, update_mask: str) -> DatabaseInstance Update a Database Instance. - + :param name: str The name of the instance. This is the unique identifier for the instance. :param database_instance: :class:`DatabaseInstance` :param update_mask: str The list of fields to update. If unspecified, all fields will be updated when possible. To wipe out custom_tags, specify custom_tags in the update_mask with an empty custom_tags map. - + :returns: :class:`DatabaseInstance` .. py:method:: update_synced_database_table(name: str, synced_table: SyncedDatabaseTable, update_mask: str) -> SyncedDatabaseTable This API is currently unimplemented, but exposed for Terraform support. - + :param name: str Full three-part (catalog, schema, table) name of the table. :param synced_table: :class:`SyncedDatabaseTable` Note that updating a synced database table is not yet supported. :param update_mask: str The list of fields to update. Setting this field is not yet supported. - + :returns: :class:`SyncedDatabaseTable` diff --git a/docs/workspace/files/dbfs.rst b/docs/workspace/files/dbfs.rst index 4a18c836f..ab5fbaa6f 100644 --- a/docs/workspace/files/dbfs.rst +++ b/docs/workspace/files/dbfs.rst @@ -11,26 +11,26 @@ Appends a block of data to the stream specified by the input handle. If the handle does not exist, this call will throw an exception with ``RESOURCE_DOES_NOT_EXIST``. - + If the block of data exceeds 1 MB, this call will throw an exception with ``MAX_BLOCK_SIZE_EXCEEDED``. - + :param handle: int The handle on an open stream. :param data: str The base64-encoded data to append to the stream. This has a limit of 1 MB. - - + + .. py:method:: close(handle: int) Closes the stream specified by the input handle. If the handle does not exist, this call throws an exception with ``RESOURCE_DOES_NOT_EXIST``. - + :param handle: int The handle on an open stream. - - + + .. py:method:: copy(src: str, dst: str [, recursive: bool = False, overwrite: bool = False]) @@ -42,17 +42,17 @@ Opens a stream to write to a file and returns a handle to this stream. There is a 10 minute idle timeout on this handle. If a file or directory already exists on the given path and __overwrite__ is set to false, this call will throw an exception with ``RESOURCE_ALREADY_EXISTS``. - + A typical workflow for file upload would be: - + 1. Issue a ``create`` call and get a handle. 2. Issue one or more ``add-block`` calls with the handle you have. 3. Issue a ``close`` call with the handle you have. - + :param path: str The path of the new file. The path should be the absolute DBFS path. :param overwrite: bool (optional) The flag that specifies whether to overwrite existing file/files. - + :returns: :class:`CreateResponse` @@ -93,10 +93,10 @@ Gets the file information for a file or directory. If the file or directory does not exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. - + :param path: str The path of the file or directory. The path should be the absolute DBFS path. - + :returns: :class:`FileInfo` @@ -125,13 +125,13 @@ this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. If a file already exists in the destination path, this call throws an exception with `RESOURCE_ALREADY_EXISTS`. If the given source path is a directory, this call always recursively moves all files. - + :param source_path: str The source path of the file or directory. The path should be the absolute DBFS path. :param destination_path: str The destination path of the file or directory. The path should be the absolute DBFS path. - - + + .. py:method:: move_(src: str, dst: str [, recursive: bool = False, overwrite: bool = False]) @@ -145,23 +145,23 @@ Uploads a file through the use of multipart form post. It is mainly used for streaming uploads, but can also be used as a convenient single call for data upload. - + Alternatively you can pass contents as base64 string. - + The amount of data that can be passed (when not streaming) using the __contents__ parameter is limited to 1 MB. `MAX_BLOCK_SIZE_EXCEEDED` will be thrown if this limit is exceeded. - + If you want to upload large files, use the streaming upload. For details, see :method:dbfs/create, :method:dbfs/addBlock, :method:dbfs/close. - + :param path: str The path of the new file. The path should be the absolute DBFS path. :param contents: str (optional) This parameter might be absent, and instead a posted file will be used. :param overwrite: bool (optional) The flag that specifies whether to overwrite existing file/files. - - + + .. py:method:: read(path: str [, length: Optional[int], offset: Optional[int]]) -> ReadResponse @@ -170,10 +170,10 @@ `RESOURCE_DOES_NOT_EXIST`. If the path is a directory, the read length is negative, or if the offset is negative, this call throws an exception with `INVALID_PARAMETER_VALUE`. If the read length exceeds 1 MB, this call throws an exception with `MAX_READ_SIZE_EXCEEDED`. - + If `offset + length` exceeds the number of bytes in a file, it reads the contents until the end of file. - + :param path: str The path of the file to read. The path should be the absolute DBFS path. :param length: int (optional) @@ -181,7 +181,7 @@ of 0.5 MB. :param offset: int (optional) The offset to read from in bytes. - + :returns: :class:`ReadResponse` diff --git a/docs/workspace/files/files.rst b/docs/workspace/files/files.rst index 6118d35e3..203d523fb 100644 --- a/docs/workspace/files/files.rst +++ b/docs/workspace/files/files.rst @@ -7,21 +7,21 @@ The Files API is a standard HTTP API that allows you to read, write, list, and delete files and directories by referring to their URI. The API makes working with file content as raw bytes easier and more efficient. - + The API supports [Unity Catalog volumes], where files and directories to operate on are specified using their volume URI path, which follows the format /Volumes/<catalog_name>/<schema_name>/<volume_name>/<path_to_file>. - + The Files API has two distinct endpoints, one for working with files (`/fs/files`) and another one for working with directories (`/fs/directories`). Both endpoints use the standard HTTP methods GET, HEAD, PUT, and DELETE to manage files and directories specified using their URI path. The path is always absolute. - + Some Files API client features are currently experimental. To enable them, set `enable_experimental_files_api_client = True` in your configuration profile or use the environment variable `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. - + Use of Files API may incur Databricks data transfer charges. - + [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html .. py:method:: create_directory(directory_path: str) @@ -29,44 +29,44 @@ Creates an empty directory. If necessary, also creates any parent directories of the new, empty directory (like the shell command `mkdir -p`). If called on an existing directory, returns a success response; this method is idempotent (it will succeed if the directory already exists). - + :param directory_path: str The absolute path of a directory. - - + + .. py:method:: delete(file_path: str) Deletes a file. If the request is successful, there is no response body. - + :param file_path: str The absolute path of the file. - - + + .. py:method:: delete_directory(directory_path: str) Deletes an empty directory. - + To delete a non-empty directory, first delete all of its contents. This can be done by listing the directory contents and deleting each file and subdirectory recursively. - + :param directory_path: str The absolute path of a directory. - - + + .. py:method:: download(file_path: str) -> DownloadResponse Downloads a file. The file contents are the response body. This is a standard HTTP file download, not a JSON RPC. It supports the Range and If-Unmodified-Since HTTP headers. - + :param file_path: str The absolute path of the file. - + :returns: :class:`DownloadResponse` @@ -74,25 +74,25 @@ Get the metadata of a directory. The response HTTP headers contain the metadata. There is no response body. - + This method is useful to check if a directory exists and the caller has access to it. - + If you wish to ensure the directory exists, you can instead use `PUT`, which will create the directory if it does not exist, and is idempotent (it will succeed if the directory already exists). - + :param directory_path: str The absolute path of a directory. - - + + .. py:method:: get_metadata(file_path: str) -> GetMetadataResponse Get the metadata of a file. The response HTTP headers contain the metadata. There is no response body. - + :param file_path: str The absolute path of the file. - + :returns: :class:`GetMetadataResponse` @@ -100,17 +100,17 @@ Returns the contents of a directory. If there is no directory at the specified path, the API returns a HTTP 404 error. - + :param directory_path: str The absolute path of a directory. :param page_size: int (optional) The maximum number of directory entries to return. The response may contain fewer entries. If the response contains a `next_page_token`, there may be more entries, even if fewer than `page_size` entries are in the response. - + We recommend not to set this value unless you are intentionally listing less than the complete directory contents. - + If unspecified, at most 1000 directory entries will be returned. The maximum value is 1000. Values above 1000 will be coerced to 1000. :param page_token: str (optional) @@ -120,7 +120,7 @@ request. To list all of the entries in a directory, it is necessary to continue requesting pages of entries until the response contains no `next_page_token`. Note that the number of entries returned must not be used to determine when the listing is complete. - + :returns: Iterator over :class:`DirectoryEntry` @@ -130,13 +130,13 @@ octet stream); do not encode or otherwise modify the bytes before sending. The contents of the resulting file will be exactly the bytes sent in the request body. If the request is successful, there is no response body. - + :param file_path: str The absolute path of the file. :param contents: BinaryIO :param overwrite: bool (optional) If true or unspecified, an existing file will be overwritten. If false, an error will be returned if the path points to an existing file. - - + + \ No newline at end of file diff --git a/docs/workspace/iam/access_control.rst b/docs/workspace/iam/access_control.rst index 930af105a..a5f1feeda 100644 --- a/docs/workspace/iam/access_control.rst +++ b/docs/workspace/iam/access_control.rst @@ -9,7 +9,7 @@ .. py:method:: check_policy(actor: Actor, permission: str, resource: str, consistency_token: ConsistencyToken, authz_identity: RequestAuthzIdentity [, resource_info: Optional[ResourceInfo]]) -> CheckPolicyResponse Check access policy to a resource. - + :param actor: :class:`Actor` :param permission: str :param resource: str @@ -18,6 +18,6 @@ :param consistency_token: :class:`ConsistencyToken` :param authz_identity: :class:`RequestAuthzIdentity` :param resource_info: :class:`ResourceInfo` (optional) - + :returns: :class:`CheckPolicyResponse` \ No newline at end of file diff --git a/docs/workspace/iam/account_access_control_proxy.rst b/docs/workspace/iam/account_access_control_proxy.rst index 39b5a83ca..2a3160c86 100644 --- a/docs/workspace/iam/account_access_control_proxy.rst +++ b/docs/workspace/iam/account_access_control_proxy.rst @@ -12,15 +12,15 @@ Gets all the roles that can be granted on an account level resource. A role is grantable if the rule set on the resource can contain an access rule of the role. - + :param resource: str The resource name for which assignable roles will be listed. - + Examples | Summary :--- | :--- `resource=accounts/` | A resource name for the account. `resource=accounts//groups/` | A resource name for the group. `resource=accounts//servicePrincipals/` | A resource name for the service principal. - + :returns: :class:`GetAssignableRolesForResourceResponse` @@ -28,10 +28,10 @@ Get a rule set by its name. A rule set is always attached to a resource and contains a list of access rules on the said resource. Currently only a default rule set for each resource is supported. - + :param name: str The ruleset name associated with the request. - + Examples | Summary :--- | :--- `name=accounts//ruleSets/default` | A name for a rule set on the account. `name=accounts//groups//ruleSets/default` | A name for a rule set on the group. @@ -44,11 +44,11 @@ modify -> write pattern to perform rule set updates in order to avoid race conditions that is get an etag from a GET rule set request, and pass it with the PUT update request to identify the rule set version you are updating. - + Examples | Summary :--- | :--- `etag=` | An empty etag can only be used in GET to indicate no freshness requirements. `etag=RENUAAABhSweA4NvVmmUYdiU717H3Tgy0UJdor3gE4a+mq/oj9NjAf8ZsQ==` | An etag encoded a specific version of the rule set to get or to be updated. - + :returns: :class:`RuleSetResponse` @@ -56,10 +56,10 @@ Replace the rules of a rule set. First, use get to read the current version of the rule set before modifying it. This pattern helps prevent conflicts between concurrent updates. - + :param name: str Name of the rule set. :param rule_set: :class:`RuleSetUpdateRequest` - + :returns: :class:`RuleSetResponse` \ No newline at end of file diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index 2f95213e2..eeaee86ee 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -20,7 +20,7 @@ me2 = w.current_user.me() Get details about the current method caller's identity. - - + + :returns: :class:`User` \ No newline at end of file diff --git a/docs/workspace/iam/groups.rst b/docs/workspace/iam/groups.rst index 737939095..0e4e6769c 100644 --- a/docs/workspace/iam/groups.rst +++ b/docs/workspace/iam/groups.rst @@ -6,7 +6,7 @@ Groups simplify identity management, making it easier to assign access to Databricks workspace, data, and other securable objects. - + It is best practice to assign access to workspaces and access-control policies in Unity Catalog to groups, instead of to users individually. All Databricks workspace identities can be assigned as members of groups, and members inherit permissions that are assigned to their group. @@ -30,15 +30,16 @@ w.groups.delete(id=group.id) Creates a group in the Databricks workspace with a unique name, using the supplied group details. - + :param display_name: str (optional) String that represents a human-readable group name :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the group. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param id: str (optional) Databricks group ID @@ -49,7 +50,7 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`GroupSchema`] (optional) The schema of the group. - + :returns: :class:`Group` @@ -71,11 +72,11 @@ w.groups.delete(id=group.id) Deletes a group from the Databricks workspace. - + :param id: str Unique ID for a group in the Databricks workspace. - - + + .. py:method:: get(id: str) -> Group @@ -99,17 +100,17 @@ w.groups.delete(id=group.id) Gets the information for a specific group in the Databricks workspace. - + :param id: str Unique ID for a group in the Databricks workspace. - + :returns: :class:`Group` .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[Group] Gets all details of the groups associated with the Databricks workspace. - + :param attributes: str (optional) Comma-separated list of attributes to return in response. :param count: int (optional) @@ -121,7 +122,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. @@ -129,7 +130,7 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: Iterator over :class:`Group` @@ -175,20 +176,20 @@ w.groups.delete(id=group.id) Partially updates the details of a group. - + :param id: str Unique ID in the Databricks workspace. :param operations: List[:class:`Patch`] (optional) :param schemas: List[:class:`PatchSchema`] (optional) The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. - - + + .. py:method:: update(id: str [, display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], members: Optional[List[ComplexValue]], meta: Optional[ResourceMeta], roles: Optional[List[ComplexValue]], schemas: Optional[List[GroupSchema]]]) Updates the details of a group by replacing the entire group entity. - + :param id: str Databricks group ID :param display_name: str (optional) @@ -196,9 +197,10 @@ :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the group. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) + external_id should be unique for identifying groups :param groups: List[:class:`ComplexValue`] (optional) :param members: List[:class:`ComplexValue`] (optional) :param meta: :class:`ResourceMeta` (optional) @@ -207,6 +209,6 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`GroupSchema`] (optional) The schema of the group. - - + + \ No newline at end of file diff --git a/docs/workspace/iam/permission_migration.rst b/docs/workspace/iam/permission_migration.rst index 248b1b80d..8eef6e0e1 100644 --- a/docs/workspace/iam/permission_migration.rst +++ b/docs/workspace/iam/permission_migration.rst @@ -9,7 +9,7 @@ .. py:method:: migrate_permissions(workspace_id: int, from_workspace_group_name: str, to_account_group_name: str [, size: Optional[int]]) -> MigratePermissionsResponse Migrate Permissions. - + :param workspace_id: int WorkspaceId of the associated workspace where the permission migration will occur. :param from_workspace_group_name: str @@ -18,6 +18,6 @@ The name of the account group that permissions will be migrated to. :param size: int (optional) The maximum number of permissions that will be migrated. - + :returns: :class:`MigratePermissionsResponse` \ No newline at end of file diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index dae53fa2e..6fb3ce8a6 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -24,7 +24,7 @@ the required permissions for specific actions or abilities and other important information, see [Access Control]. Note that to manage access control on service principals, use **[Account Access Control Proxy](:service:accountaccesscontrolproxy)**. - + [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html .. py:method:: get(request_object_type: str, request_object_id: str) -> ObjectPermissions @@ -48,7 +48,7 @@ Gets the permissions of an object. Objects can inherit permissions from their parent objects or root object. - + :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, @@ -56,7 +56,7 @@ warehouses. :param request_object_id: str The id of the request object. - + :returns: :class:`ObjectPermissions` @@ -80,14 +80,14 @@ levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) Gets the permission levels that a user can have on an object. - + :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str - + :returns: :class:`GetPermissionLevelsResponse` @@ -128,7 +128,7 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their parent objects or root object. - + :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, @@ -137,7 +137,7 @@ :param request_object_id: str The id of the request object. :param access_control_list: List[:class:`AccessControlRequest`] (optional) - + :returns: :class:`ObjectPermissions` @@ -145,7 +145,7 @@ Updates the permissions on an object. Objects can inherit permissions from their parent objects or root object. - + :param request_object_type: str The type of the request object. Can be one of the following: alerts, alertsv2, authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, @@ -154,6 +154,6 @@ :param request_object_id: str The id of the request object. :param access_control_list: List[:class:`AccessControlRequest`] (optional) - + :returns: :class:`ObjectPermissions` \ No newline at end of file diff --git a/docs/workspace/iam/service_principals.rst b/docs/workspace/iam/service_principals.rst index ce8978afb..d78c508a5 100644 --- a/docs/workspace/iam/service_principals.rst +++ b/docs/workspace/iam/service_principals.rst @@ -35,7 +35,7 @@ w.service_principals.delete(id=spn.id) Creates a new service principal in the Databricks workspace. - + :param active: bool (optional) If this user is active :param application_id: str (optional) @@ -45,7 +45,7 @@ :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the service principal. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) :param groups: List[:class:`ComplexValue`] (optional) @@ -55,18 +55,18 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`ServicePrincipalSchema`] (optional) The schema of the List response. - + :returns: :class:`ServicePrincipal` .. py:method:: delete(id: str) Delete a single service principal in the Databricks workspace. - + :param id: str Unique ID for a service principal in the Databricks workspace. - - + + .. py:method:: get(id: str) -> ServicePrincipal @@ -90,10 +90,10 @@ w.service_principals.delete(id=created.id) Gets the details for a single service principal define in the Databricks workspace. - + :param id: str Unique ID for a service principal in the Databricks workspace. - + :returns: :class:`ServicePrincipal` @@ -112,7 +112,7 @@ all = w.service_principals.list(iam.ListServicePrincipalsRequest()) Gets the set of service principals associated with a Databricks workspace. - + :param attributes: str (optional) Comma-separated list of attributes to return in response. :param count: int (optional) @@ -124,7 +124,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. @@ -132,7 +132,7 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: Iterator over :class:`ServicePrincipal` @@ -164,14 +164,14 @@ w.service_principals.delete(id=created.id) Partially updates the details of a single service principal in the Databricks workspace. - + :param id: str Unique ID in the Databricks workspace. :param operations: List[:class:`Patch`] (optional) :param schemas: List[:class:`PatchSchema`] (optional) The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. - - + + .. py:method:: update(id: str [, active: Optional[bool], application_id: Optional[str], display_name: Optional[str], entitlements: Optional[List[ComplexValue]], external_id: Optional[str], groups: Optional[List[ComplexValue]], roles: Optional[List[ComplexValue]], schemas: Optional[List[ServicePrincipalSchema]]]) @@ -200,9 +200,9 @@ w.service_principals.delete(id=created.id) Updates the details of a single service principal. - + This action replaces the existing service principal with the same name. - + :param id: str Databricks service principal ID. :param active: bool (optional) @@ -214,7 +214,7 @@ :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the service principal. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) :param groups: List[:class:`ComplexValue`] (optional) @@ -222,6 +222,6 @@ Corresponds to AWS instance profile/arn role. :param schemas: List[:class:`ServicePrincipalSchema`] (optional) The schema of the List response. - - + + \ No newline at end of file diff --git a/docs/workspace/iam/users.rst b/docs/workspace/iam/users.rst index fbee85661..08567093a 100644 --- a/docs/workspace/iam/users.rst +++ b/docs/workspace/iam/users.rst @@ -5,7 +5,7 @@ .. py:class:: UsersAPI User identities recognized by Databricks and represented by email addresses. - + Databricks recommends using SCIM provisioning to sync users and groups automatically from your identity provider to your Databricks workspace. SCIM streamlines onboarding a new employee or team by using your identity provider to create users and groups in Databricks workspace and give them the proper level of @@ -34,20 +34,20 @@ Creates a new user in the Databricks workspace. This new user will also be added to the Databricks account. - + :param active: bool (optional) If this user is active :param display_name: str (optional) String that represents a concatenation of given and family names. For example `John Smith`. This field cannot be updated through the Workspace SCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to update `displayName`. - + [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation :param emails: List[:class:`ComplexValue`] (optional) All the emails associated with the Databricks user. :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the user. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) External ID is not currently supported. It is reserved for future use. @@ -61,7 +61,7 @@ The schema of the user. :param user_name: str (optional) Email address of the Databricks user. - + :returns: :class:`User` @@ -84,11 +84,11 @@ Deletes a user. Deleting a user from a Databricks workspace also removes objects associated with the user. - + :param id: str Unique ID for a user in the Databricks workspace. - - + + .. py:method:: get(id: str [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[GetSortOrder], start_index: Optional[int]]) -> User @@ -112,7 +112,7 @@ fetch = w.users.get(id=user.id) Gets information for a specific user in Databricks workspace. - + :param id: str Unique ID for a user in the Databricks workspace. :param attributes: str (optional) @@ -126,7 +126,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. Multi-part paths are supported. For example, `userName`, @@ -135,23 +135,23 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: :class:`User` .. py:method:: get_permission_levels() -> GetPasswordPermissionLevelsResponse Gets the permission levels that a user can have on an object. - - + + :returns: :class:`GetPasswordPermissionLevelsResponse` .. py:method:: get_permissions() -> PasswordPermissions Gets the permissions of all passwords. Passwords can inherit permissions from their root object. - - + + :returns: :class:`PasswordPermissions` @@ -174,7 +174,7 @@ ) Gets details for all the users associated with a Databricks workspace. - + :param attributes: str (optional) Comma-separated list of attributes to return in response. :param count: int (optional) @@ -186,7 +186,7 @@ contains(`co`), starts with(`sw`) and not equals(`ne`). Additionally, simple expressions can be formed using logical operators - `and` and `or`. The [SCIM RFC] has more details but we currently only support simple expressions. - + [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 :param sort_by: str (optional) Attribute to sort the results. Multi-part paths are supported. For example, `userName`, @@ -195,7 +195,7 @@ The order to sort the results. :param start_index: int (optional) Specifies the index of the first result. First item is number 1. - + :returns: Iterator over :class:`User` @@ -225,23 +225,23 @@ ) Partially updates a user resource by applying the supplied operations on specific user attributes. - + :param id: str Unique ID in the Databricks workspace. :param operations: List[:class:`Patch`] (optional) :param schemas: List[:class:`PatchSchema`] (optional) The schema of the patch request. Must be ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. - - + + .. py:method:: set_permissions( [, access_control_list: Optional[List[PasswordAccessControlRequest]]]) -> PasswordPermissions Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param access_control_list: List[:class:`PasswordAccessControlRequest`] (optional) - + :returns: :class:`PasswordPermissions` @@ -266,7 +266,7 @@ w.users.update(id=user.id, user_name=user.user_name, active=True) Replaces a user's information with the data supplied in request. - + :param id: str Databricks user ID. :param active: bool (optional) @@ -275,13 +275,13 @@ String that represents a concatenation of given and family names. For example `John Smith`. This field cannot be updated through the Workspace SCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to update `displayName`. - + [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation :param emails: List[:class:`ComplexValue`] (optional) All the emails associated with the Databricks user. :param entitlements: List[:class:`ComplexValue`] (optional) Entitlements assigned to the user. See [assigning entitlements] for a full list of supported values. - + [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements :param external_id: str (optional) External ID is not currently supported. It is reserved for future use. @@ -293,15 +293,15 @@ The schema of the user. :param user_name: str (optional) Email address of the Databricks user. - - + + .. py:method:: update_permissions( [, access_control_list: Optional[List[PasswordAccessControlRequest]]]) -> PasswordPermissions Updates the permissions on all passwords. Passwords can inherit permissions from their root object. - + :param access_control_list: List[:class:`PasswordAccessControlRequest`] (optional) - + :returns: :class:`PasswordPermissions` \ No newline at end of file diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index e1d8f668f..221264d51 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -5,17 +5,17 @@ .. py:class:: JobsExt The Jobs API allows you to create, edit, and delete jobs. - + You can use a Databricks job to run a data processing or data analysis task in a Databricks cluster with scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark submit, and Java applications. - + You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in the [Databricks CLI]. Use the [Secrets utility] to reference secrets in notebooks and jobs. - + [Databricks CLI]: https://docs.databricks.com/dev-tools/cli/index.html [Secrets CLI]: https://docs.databricks.com/dev-tools/cli/secrets-cli.html [Secrets utility]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-secrets @@ -61,14 +61,14 @@ Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't prevent new runs from being started. - + :param all_queued_runs: bool (optional) Optional boolean parameter to cancel all queued runs. If no job_id is provided, all queued runs in the workspace are canceled. :param job_id: int (optional) The canonical identifier of the job to cancel all runs of. - - + + .. py:method:: cancel_run(run_id: int) -> Wait[Run] @@ -114,10 +114,10 @@ Cancels a job run or a task run. The run is canceled asynchronously, so it may still be running when this request completes. - + :param run_id: int This field is required. - + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. @@ -164,7 +164,7 @@ w.jobs.delete(job_id=created_job.job_id) Create a new job. - + :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) List of permissions to set on the job. :param budget_policy_id: str (optional) @@ -180,7 +180,7 @@ An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding. :param edit_mode: :class:`JobEditMode` (optional) Edit mode of the job. - + * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * `EDITABLE`: The job is in an editable state and can be modified. :param email_notifications: :class:`JobEmailNotifications` (optional) @@ -197,10 +197,10 @@ :param git_source: :class:`GitSource` (optional) An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. - + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. - + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. :param health: :class:`JobsHealthRules` (optional) @@ -226,7 +226,7 @@ :param performance_target: :class:`PerformanceTarget` (optional) The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run. - + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance. @@ -261,28 +261,28 @@ usage policy used by this workload. :param webhook_notifications: :class:`WebhookNotifications` (optional) A collection of system notification IDs to notify when runs of this job begin or complete. - + :returns: :class:`CreateResponse` .. py:method:: delete(job_id: int) Deletes a job. - + :param job_id: int The canonical identifier of the job to delete. This field is required. - - + + .. py:method:: delete_run(run_id: int) Deletes a non-active run. Returns an error if the run is active. - + :param run_id: int ID of the run to delete. - - + + .. py:method:: export_run(run_id: int [, views_to_export: Optional[ViewsToExport]]) -> ExportRunOutput @@ -327,12 +327,12 @@ w.jobs.delete(job_id=created_job.job_id) Export and retrieve the job run task. - + :param run_id: int The canonical identifier for the run. This field is required. :param views_to_export: :class:`ViewsToExport` (optional) Which views to export (CODE, DASHBOARDS, or ALL). Defaults to CODE. - + :returns: :class:`ExportRunOutput` @@ -390,20 +390,20 @@ .. py:method:: get_permission_levels(job_id: str) -> GetJobPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param job_id: str The job for which to get or manage permissions. - + :returns: :class:`GetJobPermissionLevelsResponse` .. py:method:: get_permissions(job_id: str) -> JobPermissions Gets the permissions of a job. Jobs can inherit permissions from their root object. - + :param job_id: str The job for which to get or manage permissions. - + :returns: :class:`JobPermissions` @@ -502,14 +502,14 @@ the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve that value. Databricks restricts this API to returning the first 5 MB of the output. To return a larger result, you can store job results in a cloud storage service. - + This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after 60 days. If you to want to reference them beyond 60 days, you must save old run results before they expire. - + :param run_id: int The canonical identifier for the run. - + :returns: :class:`RunOutput` @@ -700,7 +700,7 @@ Re-run one or more tasks. Tasks are re-run as part of the original job run. They use the current job and task settings, and can be viewed in the history for the original job run. - + :param run_id: int The job run ID of the run to repair. The run must not be in progress. :param dbt_commands: List[str] (optional) @@ -712,9 +712,9 @@ task. If not specified upon `run-now`, it defaults to an empty list. jar_params cannot be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - + Use [Task parameter variables] to set parameters containing information about job runs. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` @@ -725,23 +725,23 @@ A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed to the notebook and is accessible through the [dbutils.widgets.get] function. - + If not specified upon `run-now`, the triggered run uses the job’s base parameters. - + notebook_params cannot be specified in conjunction with jar_params. - + Use [Task parameter variables] to set parameters containing information about job runs. - + The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param performance_target: :class:`PerformanceTarget` (optional) The performance mode on a serverless job. The performance target determines the level of compute performance or cost-efficiency for the run. This field overrides the performance target defined on the job level. - + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance. @@ -753,15 +753,15 @@ The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - + Use [Task parameter variables] to set parameters containing information about job runs. - + Important - + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param rerun_all_failed_tasks: bool (optional) If true, repair all failed tasks. Only one of `rerun_tasks` or `rerun_all_failed_tasks` can be used. @@ -776,20 +776,20 @@ as command-line parameters. If specified upon `run-now`, it would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - + Use [Task parameter variables] to set parameters containing information about job runs - + Important - + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param sql_params: Dict[str,str] (optional) A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. - + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. @@ -846,16 +846,16 @@ Overwrite all settings for the given job. Use the [_Update_ endpoint](:method:jobs/update) to update job settings partially. - + :param job_id: int The canonical identifier of the job to reset. This field is required. :param new_settings: :class:`JobSettings` The new settings of the job. These settings completely replace the old settings. - + Changes to the field `JobBaseSettings.timeout_seconds` are applied to active runs. Changes to other fields are applied to future runs only. - - + + .. py:method:: run_now(job_id: int [, dbt_commands: Optional[List[str]], idempotency_token: Optional[str], jar_params: Optional[List[str]], job_parameters: Optional[Dict[str, str]], notebook_params: Optional[Dict[str, str]], only: Optional[List[str]], performance_target: Optional[PerformanceTarget], pipeline_params: Optional[PipelineParams], python_named_params: Optional[Dict[str, str]], python_params: Optional[List[str]], queue: Optional[QueueSettings], spark_submit_params: Optional[List[str]], sql_params: Optional[Dict[str, str]]]) -> Wait[Run] @@ -898,7 +898,7 @@ w.jobs.delete(job_id=created_job.job_id) Run a job and return the `run_id` of the triggered run. - + :param job_id: int The ID of the job to be executed :param dbt_commands: List[str] (optional) @@ -908,14 +908,14 @@ An optional token to guarantee the idempotency of job run requests. If a run with the provided token already exists, the request does not create a new run but returns the ID of the existing run instead. If a run with the provided token is deleted, an error is returned. - + If you specify the idempotency token, upon failure you can retry until the request succeeds. Databricks guarantees that exactly one run is launched with that idempotency token. - + This token must have at most 64 characters. - + For more information, see [How to ensure idempotency for jobs]. - + [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html :param jar_params: List[str] (optional) A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", "35"]`. @@ -923,9 +923,9 @@ task. If not specified upon `run-now`, it defaults to an empty list. jar_params cannot be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - + Use [Task parameter variables] to set parameters containing information about job runs. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` @@ -933,16 +933,16 @@ A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed to the notebook and is accessible through the [dbutils.widgets.get] function. - + If not specified upon `run-now`, the triggered run uses the job’s base parameters. - + notebook_params cannot be specified in conjunction with jar_params. - + Use [Task parameter variables] to set parameters containing information about job runs. - + The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param only: List[str] (optional) @@ -952,7 +952,7 @@ The performance mode on a serverless job. The performance target determines the level of compute performance or cost-efficiency for the run. This field overrides the performance target defined on the job level. - + * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance. @@ -964,15 +964,15 @@ The parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - + Use [Task parameter variables] to set parameters containing information about job runs. - + Important - + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param queue: :class:`QueueSettings` (optional) The queue settings of the run. @@ -982,20 +982,20 @@ as command-line parameters. If specified upon `run-now`, it would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - + Use [Task parameter variables] to set parameters containing information about job runs - + Important - + These parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param sql_params: Dict[str,str] (optional) A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. - + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. @@ -1008,11 +1008,11 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param job_id: str The job for which to get or manage permissions. :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) - + :returns: :class:`JobPermissions` @@ -1054,7 +1054,7 @@ Submit a one-time run. This endpoint allows you to submit a workload directly without creating a job. Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the run state after the job is submitted. - + :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) List of permissions to set on the job. :param budget_policy_id: str (optional) @@ -1067,10 +1067,10 @@ :param git_source: :class:`GitSource` (optional) An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. - + If `git_source` is set, these tasks retrieve the file from the remote repository by default. However, this behavior can be overridden by setting `source` to `WORKSPACE` on the task. - + Note: dbt and SQL File tasks support only version-controlled sources. If dbt or SQL File tasks are used, `git_source` must be defined on the job. :param health: :class:`JobsHealthRules` (optional) @@ -1078,14 +1078,14 @@ An optional token that can be used to guarantee the idempotency of job run requests. If a run with the provided token already exists, the request does not create a new run but returns the ID of the existing run instead. If a run with the provided token is deleted, an error is returned. - + If you specify the idempotency token, upon failure you can retry until the request succeeds. Databricks guarantees that exactly one run is launched with that idempotency token. - + This token must have at most 64 characters. - + For more information, see [How to ensure idempotency for jobs]. - + [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html :param notification_settings: :class:`JobNotificationSettings` (optional) Optional notification settings that are used when sending notifications to each of the @@ -1105,7 +1105,7 @@ usage policy may be applied when creating or modifying the job. :param webhook_notifications: :class:`WebhookNotifications` (optional) A collection of system notification IDs to notify when the run begins or completes. - + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. @@ -1160,7 +1160,7 @@ Add, update, or remove specific settings of an existing job. Use the [_Reset_ endpoint](:method:jobs/reset) to overwrite all job settings. - + :param job_id: int The canonical identifier of the job to update. This field is required. :param fields_to_remove: List[str] (optional) @@ -1168,27 +1168,27 @@ tasks and job clusters (`tasks/task_1`). This field is optional. :param new_settings: :class:`JobSettings` (optional) The new settings for the job. - + Top-level fields specified in `new_settings` are completely replaced, except for arrays which are merged. That is, new and existing entries are completely replaced based on the respective key fields, i.e. `task_key` or `job_cluster_key`, while previous entries are kept. - + Partially updating nested fields is not supported. - + Changes to the field `JobSettings.timeout_seconds` are applied to active runs. Changes to other fields are applied to future runs only. - - + + .. py:method:: update_permissions(job_id: str [, access_control_list: Optional[List[JobAccessControlRequest]]]) -> JobPermissions Updates the permissions on a job. Jobs can inherit permissions from their root object. - + :param job_id: str The job for which to get or manage permissions. :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) - + :returns: :class:`JobPermissions` diff --git a/docs/workspace/jobs/policy_compliance_for_jobs.rst b/docs/workspace/jobs/policy_compliance_for_jobs.rst index 027471d77..ecc79cc6d 100644 --- a/docs/workspace/jobs/policy_compliance_for_jobs.rst +++ b/docs/workspace/jobs/policy_compliance_for_jobs.rst @@ -6,12 +6,12 @@ The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace. This API currently only supports compliance controls for cluster policies. - + A job is in compliance if its cluster configurations satisfy the rules of all their respective cluster policies. A job could be out of compliance if a cluster policy it uses was updated after the job was last edited. The job is considered out of compliance if any of its clusters no longer comply with their updated policies. - + The get and list compliance APIs allow you to view the policy compliance status of a job. The enforce compliance API allows you to update a job so that it becomes compliant with all of its policies. @@ -20,12 +20,12 @@ Updates a job so the job clusters that are created when running the job (specified in `new_cluster`) are compliant with the current versions of their respective cluster policies. All-purpose clusters used in the job will not be updated. - + :param job_id: int The ID of the job you want to enforce policy compliance on. :param validate_only: bool (optional) If set, previews changes made to the job to comply with its policy, but does not update the job. - + :returns: :class:`EnforcePolicyComplianceResponse` @@ -34,10 +34,10 @@ Returns the policy compliance status of a job. Jobs could be out of compliance if a cluster policy they use was updated after the job was last edited and some of its job clusters no longer comply with their updated policies. - + :param job_id: int The ID of the job whose compliance status you are requesting. - + :returns: :class:`GetPolicyComplianceResponse` @@ -46,7 +46,7 @@ Returns the policy compliance status of all jobs that use a given policy. Jobs could be out of compliance if a cluster policy they use was updated after the job was last edited and its job clusters no longer comply with the updated policy. - + :param policy_id: str Canonical unique identifier for the cluster policy. :param page_size: int (optional) @@ -55,6 +55,6 @@ :param page_token: str (optional) A page token that can be used to navigate to the next page or previous page as returned by `next_page_token` or `prev_page_token`. - + :returns: Iterator over :class:`JobCompliance` \ No newline at end of file diff --git a/docs/workspace/marketplace/consumer_fulfillments.rst b/docs/workspace/marketplace/consumer_fulfillments.rst index 8977abe5c..4054101a7 100644 --- a/docs/workspace/marketplace/consumer_fulfillments.rst +++ b/docs/workspace/marketplace/consumer_fulfillments.rst @@ -9,11 +9,11 @@ .. py:method:: get(listing_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SharedDataObject] Get a high level preview of the metadata of listing installable content. - + :param listing_id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`SharedDataObject` @@ -23,10 +23,10 @@ Standard installations contain metadata about the attached share or git repo. Only one of these fields will be present. Personalized installations contain metadata about the attached share or git repo, as well as the Delta Sharing recipient type. - + :param listing_id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ListingFulfillment` \ No newline at end of file diff --git a/docs/workspace/marketplace/consumer_installations.rst b/docs/workspace/marketplace/consumer_installations.rst index 8314bef65..adb0d272a 100644 --- a/docs/workspace/marketplace/consumer_installations.rst +++ b/docs/workspace/marketplace/consumer_installations.rst @@ -9,7 +9,7 @@ .. py:method:: create(listing_id: str [, accepted_consumer_terms: Optional[ConsumerTerms], catalog_name: Optional[str], recipient_type: Optional[DeltaSharingRecipientType], repo_detail: Optional[RepoInstallation], share_name: Optional[str]]) -> Installation Install payload associated with a Databricks Marketplace listing. - + :param listing_id: str :param accepted_consumer_terms: :class:`ConsumerTerms` (optional) :param catalog_name: str (optional) @@ -17,38 +17,38 @@ :param repo_detail: :class:`RepoInstallation` (optional) for git repo installations :param share_name: str (optional) - + :returns: :class:`Installation` .. py:method:: delete(listing_id: str, installation_id: str) Uninstall an installation associated with a Databricks Marketplace listing. - + :param listing_id: str :param installation_id: str - - + + .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[InstallationDetail] List all installations across all listings. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`InstallationDetail` .. py:method:: list_listing_installations(listing_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[InstallationDetail] List all installations for a particular listing. - + :param listing_id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`InstallationDetail` @@ -58,11 +58,11 @@ as interact with external services according to the fields not included in the installation table 1. the token will be rotate if the rotateToken flag is true 2. the token will be forcibly rotate if the rotateToken flag is true and the tokenInfo field is empty - + :param listing_id: str :param installation_id: str :param installation: :class:`InstallationDetail` :param rotate_token: bool (optional) - + :returns: :class:`UpdateInstallationResponse` \ No newline at end of file diff --git a/docs/workspace/marketplace/consumer_listings.rst b/docs/workspace/marketplace/consumer_listings.rst index ab26396d5..97956c94a 100644 --- a/docs/workspace/marketplace/consumer_listings.rst +++ b/docs/workspace/marketplace/consumer_listings.rst @@ -10,25 +10,25 @@ .. py:method:: batch_get( [, ids: Optional[List[str]]]) -> BatchGetListingsResponse Batch get a published listing in the Databricks Marketplace that the consumer has access to. - + :param ids: List[str] (optional) - + :returns: :class:`BatchGetListingsResponse` .. py:method:: get(id: str) -> GetListingResponse Get a published listing in the Databricks Marketplace that the consumer has access to. - + :param id: str - + :returns: :class:`GetListingResponse` .. py:method:: list( [, assets: Optional[List[AssetType]], categories: Optional[List[Category]], is_free: Optional[bool], is_private_exchange: Optional[bool], is_staff_pick: Optional[bool], page_size: Optional[int], page_token: Optional[str], provider_ids: Optional[List[str]], tags: Optional[List[ListingTag]]]) -> Iterator[Listing] List all published listings in the Databricks Marketplace that the consumer has access to. - + :param assets: List[:class:`AssetType`] (optional) Matches any of the following asset types :param categories: List[:class:`Category`] (optional) @@ -45,7 +45,7 @@ Matches any of the following provider ids :param tags: List[:class:`ListingTag`] (optional) Matches any of the following tags - + :returns: Iterator over :class:`Listing` @@ -53,7 +53,7 @@ Search published listings in the Databricks Marketplace that the consumer has access to. This query supports a variety of different search parameters and performs fuzzy matching. - + :param query: str Fuzzy matches query :param assets: List[:class:`AssetType`] (optional) @@ -66,6 +66,6 @@ :param page_token: str (optional) :param provider_ids: List[str] (optional) Matches any of the following provider ids - + :returns: Iterator over :class:`Listing` \ No newline at end of file diff --git a/docs/workspace/marketplace/consumer_personalization_requests.rst b/docs/workspace/marketplace/consumer_personalization_requests.rst index 1f8c12932..d9d8f9839 100644 --- a/docs/workspace/marketplace/consumer_personalization_requests.rst +++ b/docs/workspace/marketplace/consumer_personalization_requests.rst @@ -9,7 +9,7 @@ .. py:method:: create(listing_id: str, intended_use: str, accepted_consumer_terms: ConsumerTerms [, comment: Optional[str], company: Optional[str], first_name: Optional[str], is_from_lighthouse: Optional[bool], last_name: Optional[str], recipient_type: Optional[DeltaSharingRecipientType]]) -> CreatePersonalizationRequestResponse Create a personalization request for a listing. - + :param listing_id: str :param intended_use: str :param accepted_consumer_terms: :class:`ConsumerTerms` @@ -19,7 +19,7 @@ :param is_from_lighthouse: bool (optional) :param last_name: str (optional) :param recipient_type: :class:`DeltaSharingRecipientType` (optional) - + :returns: :class:`CreatePersonalizationRequestResponse` @@ -27,18 +27,18 @@ Get the personalization request for a listing. Each consumer can make at *most* one personalization request for a listing. - + :param listing_id: str - + :returns: :class:`GetPersonalizationRequestResponse` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[PersonalizationRequest] List personalization requests for a consumer across all listings. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`PersonalizationRequest` \ No newline at end of file diff --git a/docs/workspace/marketplace/consumer_providers.rst b/docs/workspace/marketplace/consumer_providers.rst index 1c275b1a5..00787c78b 100644 --- a/docs/workspace/marketplace/consumer_providers.rst +++ b/docs/workspace/marketplace/consumer_providers.rst @@ -9,28 +9,28 @@ .. py:method:: batch_get( [, ids: Optional[List[str]]]) -> BatchGetProvidersResponse Batch get a provider in the Databricks Marketplace with at least one visible listing. - + :param ids: List[str] (optional) - + :returns: :class:`BatchGetProvidersResponse` .. py:method:: get(id: str) -> GetProviderResponse Get a provider in the Databricks Marketplace with at least one visible listing. - + :param id: str - + :returns: :class:`GetProviderResponse` .. py:method:: list( [, is_featured: Optional[bool], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ProviderInfo] List all providers in the Databricks Marketplace with at least one visible listing. - + :param is_featured: bool (optional) :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ProviderInfo` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_exchange_filters.rst b/docs/workspace/marketplace/provider_exchange_filters.rst index c5d91f81a..f6463a822 100644 --- a/docs/workspace/marketplace/provider_exchange_filters.rst +++ b/docs/workspace/marketplace/provider_exchange_filters.rst @@ -9,38 +9,38 @@ .. py:method:: create(filter: ExchangeFilter) -> CreateExchangeFilterResponse Add an exchange filter. - + :param filter: :class:`ExchangeFilter` - + :returns: :class:`CreateExchangeFilterResponse` .. py:method:: delete(id: str) Delete an exchange filter - + :param id: str - - + + .. py:method:: list(exchange_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ExchangeFilter] List exchange filter - + :param exchange_id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ExchangeFilter` .. py:method:: update(id: str, filter: ExchangeFilter) -> UpdateExchangeFilterResponse Update an exchange filter. - + :param id: str :param filter: :class:`ExchangeFilter` - + :returns: :class:`UpdateExchangeFilterResponse` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_exchanges.rst b/docs/workspace/marketplace/provider_exchanges.rst index 7b8841b07..9a49f0653 100644 --- a/docs/workspace/marketplace/provider_exchanges.rst +++ b/docs/workspace/marketplace/provider_exchanges.rst @@ -9,87 +9,87 @@ .. py:method:: add_listing_to_exchange(listing_id: str, exchange_id: str) -> AddExchangeForListingResponse Associate an exchange with a listing - + :param listing_id: str :param exchange_id: str - + :returns: :class:`AddExchangeForListingResponse` .. py:method:: create(exchange: Exchange) -> CreateExchangeResponse Create an exchange - + :param exchange: :class:`Exchange` - + :returns: :class:`CreateExchangeResponse` .. py:method:: delete(id: str) This removes a listing from marketplace. - + :param id: str - - + + .. py:method:: delete_listing_from_exchange(id: str) Disassociate an exchange with a listing - + :param id: str - - + + .. py:method:: get(id: str) -> GetExchangeResponse Get an exchange. - + :param id: str - + :returns: :class:`GetExchangeResponse` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Exchange] List exchanges visible to provider - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`Exchange` .. py:method:: list_exchanges_for_listing(listing_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ExchangeListing] List exchanges associated with a listing - + :param listing_id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ExchangeListing` .. py:method:: list_listings_for_exchange(exchange_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ExchangeListing] List listings associated with an exchange - + :param exchange_id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ExchangeListing` .. py:method:: update(id: str, exchange: Exchange) -> UpdateExchangeResponse Update an exchange - + :param id: str :param exchange: :class:`Exchange` - + :returns: :class:`UpdateExchangeResponse` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_files.rst b/docs/workspace/marketplace/provider_files.rst index 0120e52a7..ccd229496 100644 --- a/docs/workspace/marketplace/provider_files.rst +++ b/docs/workspace/marketplace/provider_files.rst @@ -9,40 +9,40 @@ .. py:method:: create(file_parent: FileParent, marketplace_file_type: MarketplaceFileType, mime_type: str [, display_name: Optional[str]]) -> CreateFileResponse Create a file. Currently, only provider icons and attached notebooks are supported. - + :param file_parent: :class:`FileParent` :param marketplace_file_type: :class:`MarketplaceFileType` :param mime_type: str :param display_name: str (optional) - + :returns: :class:`CreateFileResponse` .. py:method:: delete(file_id: str) Delete a file - + :param file_id: str - - + + .. py:method:: get(file_id: str) -> GetFileResponse Get a file - + :param file_id: str - + :returns: :class:`GetFileResponse` .. py:method:: list(file_parent: FileParent [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[FileInfo] List files attached to a parent entity. - + :param file_parent: :class:`FileParent` :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`FileInfo` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_listings.rst b/docs/workspace/marketplace/provider_listings.rst index 253dc8679..0aeea7575 100644 --- a/docs/workspace/marketplace/provider_listings.rst +++ b/docs/workspace/marketplace/provider_listings.rst @@ -10,46 +10,46 @@ .. py:method:: create(listing: Listing) -> CreateListingResponse Create a new listing - + :param listing: :class:`Listing` - + :returns: :class:`CreateListingResponse` .. py:method:: delete(id: str) Delete a listing - + :param id: str - - + + .. py:method:: get(id: str) -> GetListingResponse Get a listing - + :param id: str - + :returns: :class:`GetListingResponse` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Listing] List listings owned by this provider - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`Listing` .. py:method:: update(id: str, listing: Listing) -> UpdateListingResponse Update a listing - + :param id: str :param listing: :class:`Listing` - + :returns: :class:`UpdateListingResponse` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_personalization_requests.rst b/docs/workspace/marketplace/provider_personalization_requests.rst index 43ce06ca8..98aa10e92 100644 --- a/docs/workspace/marketplace/provider_personalization_requests.rst +++ b/docs/workspace/marketplace/provider_personalization_requests.rst @@ -11,22 +11,22 @@ List personalization requests to this provider. This will return all personalization requests, regardless of which listing they are for. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`PersonalizationRequest` .. py:method:: update(listing_id: str, request_id: str, status: PersonalizationRequestStatus [, reason: Optional[str], share: Optional[ShareInfo]]) -> UpdatePersonalizationRequestResponse Update personalization request. This method only permits updating the status of the request. - + :param listing_id: str :param request_id: str :param status: :class:`PersonalizationRequestStatus` :param reason: str (optional) :param share: :class:`ShareInfo` (optional) - + :returns: :class:`UpdatePersonalizationRequestResponse` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_provider_analytics_dashboards.rst b/docs/workspace/marketplace/provider_provider_analytics_dashboards.rst index 2e6878988..eec732681 100644 --- a/docs/workspace/marketplace/provider_provider_analytics_dashboards.rst +++ b/docs/workspace/marketplace/provider_provider_analytics_dashboards.rst @@ -10,36 +10,36 @@ Create provider analytics dashboard. Returns Marketplace specific `id`. Not to be confused with the Lakeview dashboard id. - - + + :returns: :class:`ProviderAnalyticsDashboard` .. py:method:: get() -> ListProviderAnalyticsDashboardResponse Get provider analytics dashboard. - - + + :returns: :class:`ListProviderAnalyticsDashboardResponse` .. py:method:: get_latest_version() -> GetLatestVersionProviderAnalyticsDashboardResponse Get latest version of provider analytics dashboard. - - + + :returns: :class:`GetLatestVersionProviderAnalyticsDashboardResponse` .. py:method:: update(id: str [, version: Optional[int]]) -> UpdateProviderAnalyticsDashboardResponse Update provider analytics dashboard. - + :param id: str id is immutable property and can't be updated. :param version: int (optional) this is the version of the dashboard template we want to update our user to current expectation is that it should be equal to latest version of the dashboard template - + :returns: :class:`UpdateProviderAnalyticsDashboardResponse` \ No newline at end of file diff --git a/docs/workspace/marketplace/provider_providers.rst b/docs/workspace/marketplace/provider_providers.rst index 8d7a9cb34..ff5d9d1c3 100644 --- a/docs/workspace/marketplace/provider_providers.rst +++ b/docs/workspace/marketplace/provider_providers.rst @@ -9,46 +9,46 @@ .. py:method:: create(provider: ProviderInfo) -> CreateProviderResponse Create a provider - + :param provider: :class:`ProviderInfo` - + :returns: :class:`CreateProviderResponse` .. py:method:: delete(id: str) Delete provider - + :param id: str - - + + .. py:method:: get(id: str) -> GetProviderResponse Get provider profile - + :param id: str - + :returns: :class:`GetProviderResponse` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ProviderInfo] List provider profiles for account. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ProviderInfo` .. py:method:: update(id: str, provider: ProviderInfo) -> UpdateProviderResponse Update provider profile - + :param id: str :param provider: :class:`ProviderInfo` - + :returns: :class:`UpdateProviderResponse` \ No newline at end of file diff --git a/docs/workspace/ml/experiments.rst b/docs/workspace/ml/experiments.rst index 0a514b33c..e4db5838d 100644 --- a/docs/workspace/ml/experiments.rst +++ b/docs/workspace/ml/experiments.rst @@ -7,7 +7,7 @@ Experiments are the primary unit of organization in MLflow; all MLflow runs belong to an experiment. Each experiment lets you visualize, search, and compare runs, as well as download run artifacts or metadata for analysis in other tools. Experiments are maintained in a Databricks hosted MLflow tracking server. - + Experiments are located in the workspace file tree. You manage experiments using the same tools you use to manage other workspace objects such as folders, notebooks, and libraries. @@ -32,9 +32,9 @@ Creates an experiment with a name. Returns the ID of the newly created experiment. Validates that another experiment with the same name does not already exist and fails if another experiment with the same name already exists. - + Throws `RESOURCE_ALREADY_EXISTS` if an experiment with the given name exists. - + :param name: str Experiment name. :param artifact_location: str (optional) @@ -45,14 +45,14 @@ depends on the storage backend. All storage backends are guaranteed to support tag keys up to 250 bytes in size and tag values up to 5000 bytes in size. All storage backends are also guaranteed to support up to 20 tags per request. - + :returns: :class:`CreateExperimentResponse` .. py:method:: create_logged_model(experiment_id: str [, model_type: Optional[str], name: Optional[str], params: Optional[List[LoggedModelParameter]], source_run_id: Optional[str], tags: Optional[List[LoggedModelTag]]]) -> CreateLoggedModelResponse Create a logged model. - + :param experiment_id: str The ID of the experiment that owns the model. :param model_type: str (optional) @@ -65,7 +65,7 @@ The ID of the run that created the model. :param tags: List[:class:`LoggedModelTag`] (optional) Tags attached to the model. - + :returns: :class:`CreateLoggedModelResponse` @@ -97,7 +97,7 @@ Creates a new run within an experiment. A run is usually a single execution of a machine learning or data ETL pipeline. MLflow uses runs to track the `mlflowParam`, `mlflowMetric`, and `mlflowRunTag` associated with a single execution. - + :param experiment_id: str (optional) ID of the associated experiment. :param run_name: str (optional) @@ -109,7 +109,7 @@ :param user_id: str (optional) ID of the user executing the run. This field is deprecated as of MLflow 1.0, and will be removed in a future MLflow release. Use 'mlflow.user' tag instead. - + :returns: :class:`CreateRunResponse` @@ -117,43 +117,43 @@ Marks an experiment and associated metadata, runs, metrics, params, and tags for deletion. If the experiment uses FileStore, artifacts associated with the experiment are also deleted. - + :param experiment_id: str ID of the associated experiment. - - + + .. py:method:: delete_logged_model(model_id: str) Delete a logged model. - + :param model_id: str The ID of the logged model to delete. - - + + .. py:method:: delete_logged_model_tag(model_id: str, tag_key: str) Delete a tag on a logged model. - + :param model_id: str The ID of the logged model to delete the tag from. :param tag_key: str The tag key. - - + + .. py:method:: delete_run(run_id: str) Marks a run for deletion. - + :param run_id: str ID of the run to delete. - - + + .. py:method:: delete_runs(experiment_id: str, max_timestamp_millis: int [, max_runs: Optional[int]]) -> DeleteRunsResponse @@ -161,7 +161,7 @@ Bulk delete runs in an experiment that were created prior to or at the specified timestamp. Deletes at most max_runs per request. To call this API from a Databricks Notebook in Python, you can use the client code snippet on - + :param experiment_id: str The ID of the experiment containing the runs to delete. :param max_timestamp_millis: int @@ -170,7 +170,7 @@ :param max_runs: int (optional) An optional positive integer indicating the maximum number of runs to delete. The maximum allowed value for max_runs is 10000. - + :returns: :class:`DeleteRunsResponse` @@ -178,41 +178,41 @@ Deletes a tag on a run. Tags are run metadata that can be updated during a run and after a run completes. - + :param run_id: str ID of the run that the tag was logged under. Must be provided. :param key: str Name of the tag. Maximum size is 255 bytes. Must be provided. - - + + .. py:method:: finalize_logged_model(model_id: str, status: LoggedModelStatus) -> FinalizeLoggedModelResponse Finalize a logged model. - + :param model_id: str The ID of the logged model to finalize. :param status: :class:`LoggedModelStatus` Whether or not the model is ready for use. ``"LOGGED_MODEL_UPLOAD_FAILED"`` indicates that something went wrong when logging the model weights / agent code. - + :returns: :class:`FinalizeLoggedModelResponse` .. py:method:: get_by_name(experiment_name: str) -> GetExperimentByNameResponse Gets metadata for an experiment. - + This endpoint will return deleted experiments, but prefers the active experiment if an active and deleted experiment share the same name. If multiple deleted experiments share the same name, the API will return one of them. - + Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name exists. - + :param experiment_name: str Name of the associated experiment. - + :returns: :class:`GetExperimentByNameResponse` @@ -237,17 +237,17 @@ w.experiments.delete_experiment(experiment_id=experiment.experiment_id) Gets metadata for an experiment. This method works on deleted experiments. - + :param experiment_id: str ID of the associated experiment. - + :returns: :class:`GetExperimentResponse` .. py:method:: get_history(metric_key: str [, max_results: Optional[int], page_token: Optional[str], run_id: Optional[str], run_uuid: Optional[str]]) -> Iterator[Metric] Gets a list of all values for the specified metric for a given run. - + :param metric_key: str Name of the metric. :param max_results: int (optional) @@ -260,37 +260,37 @@ :param run_uuid: str (optional) [Deprecated, use `run_id` instead] ID of the run from which to fetch metric values. This field will be removed in a future MLflow version. - + :returns: Iterator over :class:`Metric` .. py:method:: get_logged_model(model_id: str) -> GetLoggedModelResponse Get a logged model. - + :param model_id: str The ID of the logged model to retrieve. - + :returns: :class:`GetLoggedModelResponse` .. py:method:: get_permission_levels(experiment_id: str) -> GetExperimentPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param experiment_id: str The experiment for which to get or manage permissions. - + :returns: :class:`GetExperimentPermissionLevelsResponse` .. py:method:: get_permissions(experiment_id: str) -> ExperimentPermissions Gets the permissions of an experiment. Experiments can inherit permissions from their root object. - + :param experiment_id: str The experiment for which to get or manage permissions. - + :returns: :class:`ExperimentPermissions` @@ -298,15 +298,15 @@ Gets the metadata, metrics, params, and tags for a run. In the case where multiple metrics with the same key are logged for a run, return only the value with the latest timestamp. - + If there are multiple values with the latest timestamp, return the maximum of these values. - + :param run_id: str ID of the run to fetch. Must be provided. :param run_uuid: str (optional) [Deprecated, use `run_id` instead] ID of the run to fetch. This field will be removed in a future MLflow version. - + :returns: :class:`GetRunResponse` @@ -317,7 +317,7 @@ UC Volumes. Please call `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents). - + :param page_token: str (optional) The token indicating the page of artifact results to fetch. `page_token` is not supported when listing artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. @@ -331,7 +331,7 @@ :param run_uuid: str (optional) [Deprecated, use `run_id` instead] ID of the run whose artifacts to list. This field will be removed in a future MLflow version. - + :returns: Iterator over :class:`FileInfo` @@ -350,7 +350,7 @@ all = w.experiments.list_experiments(ml.ListExperimentsRequest()) Gets a list of all experiments. - + :param max_results: int (optional) Maximum number of experiments desired. If `max_results` is unspecified, return all experiments. If `max_results` is too large, it'll be automatically capped at 1000. Callers of this endpoint are @@ -359,7 +359,7 @@ Token indicating the page of experiments to fetch :param view_type: :class:`ViewType` (optional) Qualifier for type of experiments to be returned. If unspecified, return only active experiments. - + :returns: Iterator over :class:`Experiment` @@ -367,45 +367,45 @@ Logs a batch of metrics, params, and tags for a run. If any data failed to be persisted, the server will respond with an error (non-200 status code). - + In case of error (due to internal server error or an invalid request), partial data may be written. - + You can write metrics, params, and tags in interleaving fashion, but within a given entity type are guaranteed to follow the order specified in the request body. - + The overwrite behavior for metrics, params, and tags is as follows: - + * Metrics: metric values are never overwritten. Logging a metric (key, value, timestamp) appends to the set of values for the metric with the provided key. - + * Tags: tag values can be overwritten by successive writes to the same tag key. That is, if multiple tag values with the same key are provided in the same API request, the last-provided tag value is written. Logging the same tag (key, value) is permitted. Specifically, logging a tag is idempotent. - + * Parameters: once written, param values cannot be changed (attempting to overwrite a param value will result in an error). However, logging the same param (key, value) is permitted. Specifically, logging a param is idempotent. - + Request Limits ------------------------------- A single JSON-serialized API request may be up to 1 MB in size and contain: - + * No more than 1000 metrics, params, and tags in total - + * Up to 1000 metrics - + * Up to 100 params - + * Up to 100 tags - + For example, a valid request might contain 900 metrics, 50 params, and 50 tags, but logging 900 metrics, 50 params, and 51 tags is invalid. - + The following limits also apply to metric, param, and tag keys and values: - + * Metric keys, param keys, and tag keys can be up to 250 characters in length - + * Parameter and tag values can be up to 250 characters in length - + :param metrics: List[:class:`Metric`] (optional) Metrics to log. A single request can contain up to 1000 metrics, and up to 1000 metrics, params, and tags in total. @@ -417,22 +417,22 @@ :param tags: List[:class:`RunTag`] (optional) Tags to log. A single request can contain up to 100 tags, and up to 1000 metrics, params, and tags in total. - - + + .. py:method:: log_inputs(run_id: str [, datasets: Optional[List[DatasetInput]], models: Optional[List[ModelInput]]]) Logs inputs, such as datasets and models, to an MLflow Run. - + :param run_id: str ID of the run to log under :param datasets: List[:class:`DatasetInput`] (optional) Dataset inputs :param models: List[:class:`ModelInput`] (optional) Model inputs - - + + .. py:method:: log_logged_model_params(model_id: str [, params: Optional[List[LoggedModelParameter]]]) @@ -440,13 +440,13 @@ Logs params for a logged model. A param is a key-value pair (string key, string value). Examples include hyperparameters used for ML model training. A param can be logged only once for a logged model, and attempting to overwrite an existing param with a different value will result in an error - + :param model_id: str The ID of the logged model to log params for. :param params: List[:class:`LoggedModelParameter`] (optional) Parameters to attach to the model. - - + + .. py:method:: log_metric(key: str, value: float, timestamp: int [, dataset_digest: Optional[str], dataset_name: Optional[str], model_id: Optional[str], run_id: Optional[str], run_uuid: Optional[str], step: Optional[int]]) @@ -454,7 +454,7 @@ Log a metric for a run. A metric is a key-value pair (string key, float value) with an associated timestamp. Examples include the various metrics that represent ML model accuracy. A metric can be logged multiple times. - + :param key: str Name of the metric. :param value: float @@ -476,35 +476,35 @@ removed in a future MLflow version. :param step: int (optional) Step at which to log the metric - - + + .. py:method:: log_model( [, model_json: Optional[str], run_id: Optional[str]]) **Note:** the [Create a logged model](/api/workspace/experiments/createloggedmodel) API replaces this endpoint. - + Log a model to an MLflow Run. - + :param model_json: str (optional) MLmodel file in json format. :param run_id: str (optional) ID of the run to log under - - + + .. py:method:: log_outputs(run_id: str [, models: Optional[List[ModelOutput]]]) Logs outputs, such as models, from an MLflow Run. - + :param run_id: str The ID of the Run from which to log outputs. :param models: List[:class:`ModelOutput`] (optional) The model outputs from the Run. - - + + .. py:method:: log_param(key: str, value: str [, run_id: Optional[str], run_uuid: Optional[str]]) @@ -512,7 +512,7 @@ Logs a param used for a run. A param is a key-value pair (string key, string value). Examples include hyperparameters used for ML model training and constant dates and values used in an ETL pipeline. A param can be logged only once for a run. - + :param key: str Name of the param. Maximum size is 255 bytes. :param value: str @@ -522,8 +522,8 @@ :param run_uuid: str (optional) [Deprecated, use `run_id` instead] ID of the run under which to log the param. This field will be removed in a future MLflow version. - - + + .. py:method:: restore_experiment(experiment_id: str) @@ -531,25 +531,25 @@ Restore an experiment marked for deletion. This also restores associated metadata, runs, metrics, params, and tags. If experiment uses FileStore, underlying artifacts associated with experiment are also restored. - + Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was permanently deleted. - + :param experiment_id: str ID of the associated experiment. - - + + .. py:method:: restore_run(run_id: str) Restores a deleted run. This also restores associated metadata, runs, metrics, params, and tags. - + Throws `RESOURCE_DOES_NOT_EXIST` if the run was never created or was permanently deleted. - + :param run_id: str ID of the run to restore. - - + + .. py:method:: restore_runs(experiment_id: str, min_timestamp_millis: int [, max_runs: Optional[int]]) -> RestoreRunsResponse @@ -557,7 +557,7 @@ Bulk restore runs in an experiment that were deleted no earlier than the specified timestamp. Restores at most max_runs per request. To call this API from a Databricks Notebook in Python, you can use the client code snippet on - + :param experiment_id: str The ID of the experiment containing the runs to restore. :param min_timestamp_millis: int @@ -566,14 +566,14 @@ :param max_runs: int (optional) An optional positive integer indicating the maximum number of runs to restore. The maximum allowed value for max_runs is 10000. - + :returns: :class:`RestoreRunsResponse` .. py:method:: search_experiments( [, filter: Optional[str], max_results: Optional[int], order_by: Optional[List[str]], page_token: Optional[str], view_type: Optional[ViewType]]) -> Iterator[Experiment] Searches for experiments that satisfy specified search criteria. - + :param filter: str (optional) String representing a SQL filter condition (e.g. "name ILIKE 'my-experiment%'") :param max_results: int (optional) @@ -586,14 +586,14 @@ Token indicating the page of experiments to fetch :param view_type: :class:`ViewType` (optional) Qualifier for type of experiments to be returned. If unspecified, return only active experiments. - + :returns: Iterator over :class:`Experiment` .. py:method:: search_logged_models( [, datasets: Optional[List[SearchLoggedModelsDataset]], experiment_ids: Optional[List[str]], filter: Optional[str], max_results: Optional[int], order_by: Optional[List[SearchLoggedModelsOrderBy]], page_token: Optional[str]]) -> SearchLoggedModelsResponse Search for Logged Models that satisfy specified search criteria. - + :param datasets: List[:class:`SearchLoggedModelsDataset`] (optional) List of datasets on which to apply the metrics filter clauses. For example, a filter with `metrics.accuracy > 0.9` and dataset info with name "test_dataset" means we will return all logged @@ -605,7 +605,7 @@ :param filter: str (optional) A filter expression over logged model info and data that allows returning a subset of logged models. The syntax is a subset of SQL that supports AND'ing together binary operations. - + Example: ``params.alpha < 0.3 AND metrics.accuracy > 0.9``. :param max_results: int (optional) The maximum number of Logged Models to return. The maximum limit is 50. @@ -613,28 +613,28 @@ The list of columns for ordering the results, with additional fields for sorting criteria. :param page_token: str (optional) The token indicating the page of logged models to fetch. - + :returns: :class:`SearchLoggedModelsResponse` .. py:method:: search_runs( [, experiment_ids: Optional[List[str]], filter: Optional[str], max_results: Optional[int], order_by: Optional[List[str]], page_token: Optional[str], run_view_type: Optional[ViewType]]) -> Iterator[Run] Searches for runs that satisfy expressions. - + Search expressions can use `mlflowMetric` and `mlflowParam` keys. - + :param experiment_ids: List[str] (optional) List of experiment IDs to search over. :param filter: str (optional) A filter expression over params, metrics, and tags, that allows returning a subset of runs. The syntax is a subset of SQL that supports ANDing together binary operations between a param, metric, or tag and a constant. - + Example: `metrics.rmse < 1 and params.model_class = 'LogisticRegression'` - + You can select columns with special characters (hyphen, space, period, etc.) by using double quotes: `metrics."model class" = 'LinearRegression' and tags."user-name" = 'Tomas'` - + Supported operators are `=`, `!=`, `>`, `>=`, `<`, and `<=`. :param max_results: int (optional) Maximum number of runs desired. Max threshold is 50000 @@ -648,52 +648,52 @@ Token for the current page of runs. :param run_view_type: :class:`ViewType` (optional) Whether to display only active, only deleted, or all runs. Defaults to only active runs. - + :returns: Iterator over :class:`Run` .. py:method:: set_experiment_tag(experiment_id: str, key: str, value: str) Sets a tag on an experiment. Experiment tags are metadata that can be updated. - + :param experiment_id: str ID of the experiment under which to log the tag. Must be provided. :param key: str Name of the tag. Keys up to 250 bytes in size are supported. :param value: str String value of the tag being logged. Values up to 64KB in size are supported. - - + + .. py:method:: set_logged_model_tags(model_id: str [, tags: Optional[List[LoggedModelTag]]]) Set tags for a logged model. - + :param model_id: str The ID of the logged model to set the tags on. :param tags: List[:class:`LoggedModelTag`] (optional) The tags to set on the logged model. - - + + .. py:method:: set_permissions(experiment_id: str [, access_control_list: Optional[List[ExperimentAccessControlRequest]]]) -> ExperimentPermissions Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param experiment_id: str The experiment for which to get or manage permissions. :param access_control_list: List[:class:`ExperimentAccessControlRequest`] (optional) - + :returns: :class:`ExperimentPermissions` .. py:method:: set_tag(key: str, value: str [, run_id: Optional[str], run_uuid: Optional[str]]) Sets a tag on a run. Tags are run metadata that can be updated during a run and after a run completes. - + :param key: str Name of the tag. Keys up to 250 bytes in size are supported. :param value: str @@ -703,8 +703,8 @@ :param run_uuid: str (optional) [Deprecated, use `run_id` instead] ID of the run under which to log the tag. This field will be removed in a future MLflow version. - - + + .. py:method:: update_experiment(experiment_id: str [, new_name: Optional[str]]) @@ -728,23 +728,23 @@ w.experiments.delete_experiment(experiment_id=experiment.experiment_id) Updates experiment metadata. - + :param experiment_id: str ID of the associated experiment. :param new_name: str (optional) If provided, the experiment's name is changed to the new name. The new name must be unique. - - + + .. py:method:: update_permissions(experiment_id: str [, access_control_list: Optional[List[ExperimentAccessControlRequest]]]) -> ExperimentPermissions Updates the permissions on an experiment. Experiments can inherit permissions from their root object. - + :param experiment_id: str The experiment for which to get or manage permissions. :param access_control_list: List[:class:`ExperimentAccessControlRequest`] (optional) - + :returns: :class:`ExperimentPermissions` @@ -776,7 +776,7 @@ w.experiments.delete_run(run_id=created.run.info.run_id) Updates run metadata. - + :param end_time: int (optional) Unix timestamp in milliseconds of when the run ended. :param run_id: str (optional) @@ -788,6 +788,6 @@ MLflow version. :param status: :class:`UpdateRunStatus` (optional) Updated status of the run. - + :returns: :class:`UpdateRunResponse` \ No newline at end of file diff --git a/docs/workspace/ml/feature_store.rst b/docs/workspace/ml/feature_store.rst index c85fd5e59..a950ee43d 100644 --- a/docs/workspace/ml/feature_store.rst +++ b/docs/workspace/ml/feature_store.rst @@ -7,74 +7,74 @@ A feature store is a centralized repository that enables data scientists to find and share features. Using a feature store also ensures that the code used to compute feature values is the same during model training and when the model is used for inference. - + An online store is a low-latency database used for feature lookup during real-time model inference or serve feature for real-time applications. .. py:method:: create_online_store(online_store: OnlineStore) -> OnlineStore Create an Online Feature Store. - + :param online_store: :class:`OnlineStore` Online store to create. - + :returns: :class:`OnlineStore` .. py:method:: delete_online_store(name: str) Delete an Online Feature Store. - + :param name: str Name of the online store to delete. - - + + .. py:method:: get_online_store(name: str) -> OnlineStore Get an Online Feature Store. - + :param name: str Name of the online store to get. - + :returns: :class:`OnlineStore` .. py:method:: list_online_stores( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[OnlineStore] List Online Feature Stores. - + :param page_size: int (optional) The maximum number of results to return. Defaults to 100 if not specified. :param page_token: str (optional) Pagination token to go to the next page based on a previous query. - + :returns: Iterator over :class:`OnlineStore` .. py:method:: publish_table(source_table_name: str, publish_spec: PublishSpec) -> PublishTableResponse Publish features. - + :param source_table_name: str The full three-part (catalog, schema, table) name of the source table. :param publish_spec: :class:`PublishSpec` The specification for publishing the online table from the source table. - + :returns: :class:`PublishTableResponse` .. py:method:: update_online_store(name: str, online_store: OnlineStore, update_mask: str) -> OnlineStore Update an Online Feature Store. - + :param name: str The name of the online store. This is the unique identifier for the online store. :param online_store: :class:`OnlineStore` Online store to update. :param update_mask: str The list of fields to update. - + :returns: :class:`OnlineStore` \ No newline at end of file diff --git a/docs/workspace/ml/forecasting.rst b/docs/workspace/ml/forecasting.rst index 3a65c4242..905f68cb7 100644 --- a/docs/workspace/ml/forecasting.rst +++ b/docs/workspace/ml/forecasting.rst @@ -9,7 +9,7 @@ .. py:method:: create_experiment(train_data_path: str, target_column: str, time_column: str, forecast_granularity: str, forecast_horizon: int [, custom_weights_column: Optional[str], experiment_path: Optional[str], future_feature_data_path: Optional[str], holiday_regions: Optional[List[str]], include_features: Optional[List[str]], max_runtime: Optional[int], prediction_data_path: Optional[str], primary_metric: Optional[str], register_to: Optional[str], split_column: Optional[str], timeseries_identifier_columns: Optional[List[str]], training_frameworks: Optional[List[str]]]) -> Wait[ForecastingExperiment] Creates a serverless forecasting experiment. Returns the experiment ID. - + :param train_data_path: str The fully qualified path of a Unity Catalog table, formatted as catalog_name.schema_name.table_name, used as training data for the forecasting model. @@ -58,7 +58,7 @@ :param training_frameworks: List[str] (optional) List of frameworks to include for model tuning. Possible values are 'Prophet', 'ARIMA', 'DeepAR'. An empty list includes all supported frameworks. - + :returns: Long-running operation waiter for :class:`ForecastingExperiment`. See :method:wait_get_experiment_forecasting_succeeded for more details. @@ -70,10 +70,10 @@ .. py:method:: get_experiment(experiment_id: str) -> ForecastingExperiment Public RPC to get forecasting experiment - + :param experiment_id: str The unique ID of a forecasting experiment - + :returns: :class:`ForecastingExperiment` diff --git a/docs/workspace/ml/materialized_features.rst b/docs/workspace/ml/materialized_features.rst index 03c4d53f8..422f3035a 100644 --- a/docs/workspace/ml/materialized_features.rst +++ b/docs/workspace/ml/materialized_features.rst @@ -10,75 +10,75 @@ .. py:method:: create_feature_tag(table_name: str, feature_name: str, feature_tag: FeatureTag) -> FeatureTag Creates a FeatureTag. - + :param table_name: str :param feature_name: str :param feature_tag: :class:`FeatureTag` - + :returns: :class:`FeatureTag` .. py:method:: delete_feature_tag(table_name: str, feature_name: str, key: str) Deletes a FeatureTag. - + :param table_name: str The name of the feature table. :param feature_name: str The name of the feature within the feature table. :param key: str The key of the tag to delete. - - + + .. py:method:: get_feature_lineage(table_name: str, feature_name: str) -> FeatureLineage Get Feature Lineage. - + :param table_name: str The full name of the feature table in Unity Catalog. :param feature_name: str The name of the feature. - + :returns: :class:`FeatureLineage` .. py:method:: get_feature_tag(table_name: str, feature_name: str, key: str) -> FeatureTag Gets a FeatureTag. - + :param table_name: str :param feature_name: str :param key: str - + :returns: :class:`FeatureTag` .. py:method:: list_feature_tags(table_name: str, feature_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[FeatureTag] Lists FeatureTags. - + :param table_name: str :param feature_name: str :param page_size: int (optional) The maximum number of results to return. :param page_token: str (optional) Pagination token to go to the next page based on a previous query. - + :returns: Iterator over :class:`FeatureTag` .. py:method:: update_feature_tag(table_name: str, feature_name: str, key: str, feature_tag: FeatureTag [, update_mask: Optional[str]]) -> FeatureTag Updates a FeatureTag. - + :param table_name: str :param feature_name: str :param key: str :param feature_tag: :class:`FeatureTag` :param update_mask: str (optional) The list of fields to update. - + :returns: :class:`FeatureTag` \ No newline at end of file diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 2d34256e4..d4db855c3 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -8,33 +8,33 @@ [Models in Unity Catalog](/api/workspace/registeredmodels) instead. Models in Unity Catalog provides centralized model governance, cross-workspace access, lineage, and deployment. Workspace Model Registry will be deprecated in the future. - + The Workspace Model Registry is a centralized model repository and a UI and set of APIs that enable you to manage the full lifecycle of MLflow Models. .. py:method:: approve_transition_request(name: str, version: str, stage: str, archive_existing_versions: bool [, comment: Optional[str]]) -> ApproveTransitionRequestResponse Approves a model version stage transition request. - + :param name: str Name of the model. :param version: str Version of the model. :param stage: str Target stage of the transition. Valid values are: - + * `None`: The initial stage of a model version. - + * `Staging`: Staging or pre-production stage. - + * `Production`: Production stage. - + * `Archived`: Archived stage. :param archive_existing_versions: bool Specifies whether to archive all current model versions in the target stage. :param comment: str (optional) User-provided comment on the action. - + :returns: :class:`ApproveTransitionRequestResponse` @@ -66,14 +66,14 @@ Posts a comment on a model version. A comment can be submitted either by a user or programmatically to display relevant information about the model. For example, test results or deployment errors. - + :param name: str Name of the model. :param version: str Version of the model. :param comment: str User-provided comment on the action. - + :returns: :class:`CreateCommentResponse` @@ -94,14 +94,14 @@ Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. - + :param name: str Register models under this name :param description: str (optional) Optional description for registered model. :param tags: List[:class:`ModelTag`] (optional) Additional metadata for registered model. - + :returns: :class:`CreateModelResponse` @@ -123,7 +123,7 @@ mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a model version. - + :param name: str Register model under this name :param source: str @@ -138,31 +138,31 @@ hosted at another instance of MLflow. :param tags: List[:class:`ModelVersionTag`] (optional) Additional metadata for model version. - + :returns: :class:`CreateModelVersionResponse` .. py:method:: create_transition_request(name: str, version: str, stage: str [, comment: Optional[str]]) -> CreateTransitionRequestResponse Creates a model version stage transition request. - + :param name: str Name of the model. :param version: str Version of the model. :param stage: str Target stage of the transition. Valid values are: - + * `None`: The initial stage of a model version. - + * `Staging`: Staging or pre-production stage. - + * `Production`: Production stage. - + * `Archived`: Archived stage. :param comment: str (optional) User-provided comment on the action. - + :returns: :class:`CreateTransitionRequestResponse` @@ -190,35 +190,35 @@ w.model_registry.delete_webhook(id=created.webhook.id) **NOTE:** This endpoint is in Public Preview. Creates a registry webhook. - + :param events: List[:class:`RegistryWebhookEvent`] Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A new model version was created for the associated model. - + * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was changed. - + * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s stage be transitioned. - + * `COMMENT_CREATED`: A user wrote a comment on a registered model. - + * `REGISTERED_MODEL_CREATED`: A new registered model was created. This event type can only be specified for a registry-wide webhook, which can be created by not specifying a model name in the create request. - + * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. - + * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was transitioned to staging. - + * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was transitioned to production. - + * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. - + * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model version be transitioned to staging. - + * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model version be transitioned to production. - + * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model version be archived. :param description: str (optional) User-specified description for the webhook. @@ -232,64 +232,64 @@ :param status: :class:`RegistryWebhookStatus` (optional) Enable or disable triggering the webhook, or put the webhook into test mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an associated event happens. - + * `DISABLED`: Webhook is not triggered. - + * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is not triggered on a real event. - + :returns: :class:`CreateWebhookResponse` .. py:method:: delete_comment(id: str) Deletes a comment on a model version. - + :param id: str Unique identifier of an activity - - + + .. py:method:: delete_model(name: str) Deletes a registered model. - + :param name: str Registered model unique name identifier. - - + + .. py:method:: delete_model_tag(name: str, key: str) Deletes the tag for a registered model. - + :param name: str Name of the registered model that the tag was logged under. :param key: str Name of the tag. The name must be an exact match; wild-card deletion is not supported. Maximum size is 250 bytes. - - + + .. py:method:: delete_model_version(name: str, version: str) Deletes a model version. - + :param name: str Name of the registered model :param version: str Model version number - - + + .. py:method:: delete_model_version_tag(name: str, version: str, key: str) Deletes a model version tag. - + :param name: str Name of the registered model that the tag was logged under. :param version: str @@ -297,56 +297,56 @@ :param key: str Name of the tag. The name must be an exact match; wild-card deletion is not supported. Maximum size is 250 bytes. - - + + .. py:method:: delete_transition_request(name: str, version: str, stage: str, creator: str [, comment: Optional[str]]) -> DeleteTransitionRequestResponse Cancels a model version stage transition request. - + :param name: str Name of the model. :param version: str Version of the model. :param stage: str Target stage of the transition request. Valid values are: - + * `None`: The initial stage of a model version. - + * `Staging`: Staging or pre-production stage. - + * `Production`: Production stage. - + * `Archived`: Archived stage. :param creator: str Username of the user who created this request. Of the transition requests matching the specified details, only the one transition created by this user will be deleted. :param comment: str (optional) User-provided comment on the action. - + :returns: :class:`DeleteTransitionRequestResponse` .. py:method:: delete_webhook(id: str) **NOTE:** This endpoint is in Public Preview. Deletes a registry webhook. - + :param id: str Webhook ID required to delete a registry webhook. - - + + .. py:method:: get_latest_versions(name: str [, stages: Optional[List[str]]]) -> Iterator[ModelVersion] Gets the latest version of a registered model. - + :param name: str Registered model unique name identifier. :param stages: List[str] (optional) List of stages. - + :returns: Iterator over :class:`ModelVersion` @@ -370,46 +370,46 @@ Get the details of a model. This is a Databricks workspace version of the [MLflow endpoint] that also returns the model's Databricks workspace ID and the permission level of the requesting user on the model. - + [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel - + :param name: str Registered model unique name identifier. - + :returns: :class:`GetModelResponse` .. py:method:: get_model_version(name: str, version: str) -> GetModelVersionResponse Get a model version. - + :param name: str Name of the registered model :param version: str Model version number - + :returns: :class:`GetModelVersionResponse` .. py:method:: get_model_version_download_uri(name: str, version: str) -> GetModelVersionDownloadUriResponse Gets a URI to download the model version. - + :param name: str Name of the registered model :param version: str Model version number - + :returns: :class:`GetModelVersionDownloadUriResponse` .. py:method:: get_permission_levels(registered_model_id: str) -> GetRegisteredModelPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param registered_model_id: str The registered model for which to get or manage permissions. - + :returns: :class:`GetRegisteredModelPermissionLevelsResponse` @@ -417,10 +417,10 @@ Gets the permissions of a registered model. Registered models can inherit permissions from their root object. - + :param registered_model_id: str The registered model for which to get or manage permissions. - + :returns: :class:`RegisteredModelPermissions` @@ -439,24 +439,24 @@ all = w.model_registry.list_models(ml.ListModelsRequest()) Lists all available registered models, up to the limit specified in __max_results__. - + :param max_results: int (optional) Maximum number of registered models desired. Max threshold is 1000. :param page_token: str (optional) Pagination token to go to the next page based on a previous query. - + :returns: Iterator over :class:`Model` .. py:method:: list_transition_requests(name: str, version: str) -> Iterator[Activity] Gets a list of all open stage transition requests for the model version. - + :param name: str Name of the registered model. :param version: str Version of the model. - + :returns: Iterator over :class:`Activity` @@ -475,37 +475,37 @@ all = w.model_registry.list_webhooks(ml.ListWebhooksRequest()) **NOTE:** This endpoint is in Public Preview. Lists all registry webhooks. - + :param events: List[:class:`RegistryWebhookEvent`] (optional) Events that trigger the webhook. * `MODEL_VERSION_CREATED`: A new model version was created for the associated model. - + * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was changed. - + * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s stage be transitioned. - + * `COMMENT_CREATED`: A user wrote a comment on a registered model. - + * `REGISTERED_MODEL_CREATED`: A new registered model was created. This event type can only be specified for a registry-wide webhook, which can be created by not specifying a model name in the create request. - + * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. - + * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was transitioned to staging. - + * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was transitioned to production. - + * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. - + * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model version be transitioned to staging. - + * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model version be transitioned to production. - + * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model version be archived. - + If `events` is specified, any webhook with one or more of the specified trigger events is included in the output. If `events` is not specified, webhooks of all event types are included in the output. :param max_results: int (optional) @@ -514,50 +514,50 @@ listed, regardless of their associated model. :param page_token: str (optional) Token indicating the page of artifact results to fetch - + :returns: Iterator over :class:`RegistryWebhook` .. py:method:: reject_transition_request(name: str, version: str, stage: str [, comment: Optional[str]]) -> RejectTransitionRequestResponse Rejects a model version stage transition request. - + :param name: str Name of the model. :param version: str Version of the model. :param stage: str Target stage of the transition. Valid values are: - + * `None`: The initial stage of a model version. - + * `Staging`: Staging or pre-production stage. - + * `Production`: Production stage. - + * `Archived`: Archived stage. :param comment: str (optional) User-provided comment on the action. - + :returns: :class:`RejectTransitionRequestResponse` .. py:method:: rename_model(name: str [, new_name: Optional[str]]) -> RenameModelResponse Renames a registered model. - + :param name: str Registered model unique name identifier. :param new_name: str (optional) If provided, updates the name for this `registered_model`. - + :returns: :class:`RenameModelResponse` .. py:method:: search_model_versions( [, filter: Optional[str], max_results: Optional[int], order_by: Optional[List[str]], page_token: Optional[str]]) -> Iterator[ModelVersion] Searches for specific model versions based on the supplied __filter__. - + :param filter: str (optional) String filter condition, like "name='my-model-name'". Must be a single boolean condition, with string values wrapped in single quotes. @@ -569,14 +569,14 @@ timestamp, followed by name ASC, followed by version DESC. :param page_token: str (optional) Pagination token to go to next page based on previous search query. - + :returns: Iterator over :class:`ModelVersion` .. py:method:: search_models( [, filter: Optional[str], max_results: Optional[int], order_by: Optional[List[str]], page_token: Optional[str]]) -> Iterator[Model] Search for registered models based on the specified __filter__. - + :param filter: str (optional) String filter condition, like "name LIKE 'my-model-name'". Interpreted in the backend automatically as "name LIKE '%my-model-name%'". Single boolean condition, with string values wrapped in single @@ -589,14 +589,14 @@ name ASC. :param page_token: str (optional) Pagination token to go to the next page based on a previous search query. - + :returns: Iterator over :class:`Model` .. py:method:: set_model_tag(name: str, key: str, value: str) Sets a tag on a registered model. - + :param name: str Unique name of the model. :param key: str @@ -606,14 +606,14 @@ :param value: str String value of the tag being logged. Maximum size depends on storage backend. All storage backends are guaranteed to support key values up to 5000 bytes in size. - - + + .. py:method:: set_model_version_tag(name: str, version: str, key: str, value: str) Sets a model version tag. - + :param name: str Unique name of the model. :param version: str @@ -625,32 +625,32 @@ :param value: str String value of the tag being logged. Maximum size depends on storage backend. All storage backends are guaranteed to support key values up to 5000 bytes in size. - - + + .. py:method:: set_permissions(registered_model_id: str [, access_control_list: Optional[List[RegisteredModelAccessControlRequest]]]) -> RegisteredModelPermissions Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param registered_model_id: str The registered model for which to get or manage permissions. :param access_control_list: List[:class:`RegisteredModelAccessControlRequest`] (optional) - + :returns: :class:`RegisteredModelPermissions` .. py:method:: test_registry_webhook(id: str [, event: Optional[RegistryWebhookEvent]]) -> TestRegistryWebhookResponse **NOTE:** This endpoint is in Public Preview. Tests a registry webhook. - + :param id: str Webhook ID :param event: :class:`RegistryWebhookEvent` (optional) If `event` is specified, the test trigger uses the specified event. If `event` is not specified, the test trigger uses a randomly chosen event associated with the webhook. - + :returns: :class:`TestRegistryWebhookResponse` @@ -658,28 +658,28 @@ Transition a model version's stage. This is a Databricks workspace version of the [MLflow endpoint] that also accepts a comment associated with the transition to be recorded. - + [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage - + :param name: str Name of the model. :param version: str Version of the model. :param stage: str Target stage of the transition. Valid values are: - + * `None`: The initial stage of a model version. - + * `Staging`: Staging or pre-production stage. - + * `Production`: Production stage. - + * `Archived`: Archived stage. :param archive_existing_versions: bool Specifies whether to archive all current model versions in the target stage. :param comment: str (optional) User-provided comment on the action. - + :returns: :class:`TransitionStageResponse` @@ -712,12 +712,12 @@ w.model_registry.delete_comment(id=created.comment.id) Post an edit to a comment on a model version. - + :param id: str Unique identifier of an activity :param comment: str User-provided comment on the action. - + :returns: :class:`UpdateCommentResponse` @@ -745,12 +745,12 @@ ) Updates a registered model. - + :param name: str Registered model unique name identifier. :param description: str (optional) If provided, updates the description for this `registered_model`. - + :returns: :class:`UpdateModelResponse` @@ -778,14 +778,14 @@ ) Updates the model version. - + :param name: str Name of the registered model :param version: str Model version number :param description: str (optional) If provided, updates the description for this `registered_model`. - + :returns: :class:`UpdateModelVersionResponse` @@ -793,11 +793,11 @@ Updates the permissions on a registered model. Registered models can inherit permissions from their root object. - + :param registered_model_id: str The registered model for which to get or manage permissions. :param access_control_list: List[:class:`RegisteredModelAccessControlRequest`] (optional) - + :returns: :class:`RegisteredModelPermissions` @@ -827,7 +827,7 @@ w.model_registry.delete_webhook(id=created.webhook.id) **NOTE:** This endpoint is in Public Preview. Updates a registry webhook. - + :param id: str Webhook ID :param description: str (optional) @@ -835,35 +835,35 @@ :param events: List[:class:`RegistryWebhookEvent`] (optional) Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A new model version was created for the associated model. - + * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was changed. - + * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s stage be transitioned. - + * `COMMENT_CREATED`: A user wrote a comment on a registered model. - + * `REGISTERED_MODEL_CREATED`: A new registered model was created. This event type can only be specified for a registry-wide webhook, which can be created by not specifying a model name in the create request. - + * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. - + * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was transitioned to staging. - + * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was transitioned to production. - + * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. - + * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model version be transitioned to staging. - + * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model version be transitioned to production. - + * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model version be archived. :param http_url_spec: :class:`HttpUrlSpec` (optional) :param job_spec: :class:`JobSpec` (optional) :param status: :class:`RegistryWebhookStatus` (optional) - + :returns: :class:`UpdateWebhookResponse` \ No newline at end of file diff --git a/docs/workspace/oauth2/service_principal_secrets_proxy.rst b/docs/workspace/oauth2/service_principal_secrets_proxy.rst index 929c8fa72..1bb9df8e3 100644 --- a/docs/workspace/oauth2/service_principal_secrets_proxy.rst +++ b/docs/workspace/oauth2/service_principal_secrets_proxy.rst @@ -6,48 +6,47 @@ These APIs enable administrators to manage service principal secrets at the workspace level. To use these APIs, the service principal must be first added to the current workspace. - + You can use the generated secrets to obtain OAuth access tokens for a service principal, which can then be used to access Databricks Accounts and Workspace APIs. For more information, see [Authentication using OAuth tokens for service principals]. - + In addition, the generated secrets can be used to configure the Databricks Terraform Providerto authenticate with the service principal. For more information, see [Databricks Terraform Provider]. - + [Authentication using OAuth tokens for service principals]: https://docs.databricks.com/dev-tools/authentication-oauth.html [Databricks Terraform Provider]: https://github.com/databricks/terraform-provider-databricks/blob/master/docs/index.md#authenticating-with-service-principal - .. py:method:: create(service_principal_id: str [, lifetime: Optional[str]]) -> CreateServicePrincipalSecretResponse Create a secret for the given service principal. - + :param service_principal_id: str The service principal ID. :param lifetime: str (optional) The lifetime of the secret in seconds. If this parameter is not provided, the secret will have a default lifetime of 730 days (63072000s). - + :returns: :class:`CreateServicePrincipalSecretResponse` .. py:method:: delete(service_principal_id: str, secret_id: str) Delete a secret from the given service principal. - + :param service_principal_id: str The service principal ID. :param secret_id: str The secret ID. - - + + .. py:method:: list(service_principal_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SecretInfo] List all secrets associated with the given service principal. This operation only returns information about the secrets themselves and does not include the secret values. - + :param service_principal_id: str The service principal ID. :param page_size: int (optional) @@ -58,6 +57,6 @@ previous request. To list all of the secrets for a service principal, it is necessary to continue requesting pages of entries until the response contains no `next_page_token`. Note that the number of entries returned must not be used to determine when the listing is complete. - + :returns: Iterator over :class:`SecretInfo` \ No newline at end of file diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index 7e0ae7b5f..f1a966946 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -5,11 +5,11 @@ .. py:class:: PipelinesAPI The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. - + Delta Live Tables is a framework for building reliable, maintainable, and testable data processing pipelines. You define the transformations to perform on your data, and Delta Live Tables manages task orchestration, cluster management, monitoring, data quality, and error handling. - + Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta Live Tables manages how your data is transformed based on a target schema you define for each processing step. You can also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected @@ -53,7 +53,7 @@ Creates a new data processing pipeline based on the requested configuration. If successful, this method returns the ID of the new pipeline. - + :param allow_duplicate_names: bool (optional) If false, deployment will fail if name conflicts with that of another pipeline. :param budget_policy_id: str (optional) @@ -120,7 +120,7 @@ for pipeline creation in favor of the `schema` field. :param trigger: :class:`PipelineTrigger` (optional) Which pipeline trigger to use. Deprecated: Use `continuous` instead. - + :returns: :class:`CreatePipelineResponse` @@ -128,10 +128,10 @@ Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and its tables. You cannot undo this action. - + :param pipeline_id: str - - + + .. py:method:: get(pipeline_id: str) -> GetPipelineResponse @@ -173,41 +173,41 @@ w.pipelines.delete(pipeline_id=created.pipeline_id) Get a pipeline. - + :param pipeline_id: str - + :returns: :class:`GetPipelineResponse` .. py:method:: get_permission_levels(pipeline_id: str) -> GetPipelinePermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param pipeline_id: str The pipeline for which to get or manage permissions. - + :returns: :class:`GetPipelinePermissionLevelsResponse` .. py:method:: get_permissions(pipeline_id: str) -> PipelinePermissions Gets the permissions of a pipeline. Pipelines can inherit permissions from their root object. - + :param pipeline_id: str The pipeline for which to get or manage permissions. - + :returns: :class:`PipelinePermissions` .. py:method:: get_update(pipeline_id: str, update_id: str) -> GetUpdateResponse Gets an update from an active pipeline. - + :param pipeline_id: str The ID of the pipeline. :param update_id: str The ID of the update. - + :returns: :class:`GetUpdateResponse` @@ -250,14 +250,14 @@ w.pipelines.delete(pipeline_id=created.pipeline_id) Retrieves events for a pipeline. - + :param pipeline_id: str The pipeline to return events for. :param filter: str (optional) Criteria to select a subset of results, expressed using a SQL-like syntax. The supported filters are: 1. level='INFO' (or WARN or ERROR) 2. level in ('INFO', 'WARN') 3. id='[event-id]' 4. timestamp > 'TIMESTAMP' (or >=,<,<=,=) - + Composite expressions are supported, for example: level in ('ERROR', 'WARN') AND timestamp> '2021-07-22T06:37:33.083Z' :param max_results: int (optional) @@ -271,7 +271,7 @@ Page token returned by previous call. This field is mutually exclusive with all fields in this request except max_results. An error is returned if any fields other than max_results are set when this field is set. - + :returns: Iterator over :class:`PipelineEvent` @@ -290,14 +290,14 @@ all = w.pipelines.list_pipelines(pipelines.ListPipelinesRequest()) Lists pipelines defined in the Delta Live Tables system. - + :param filter: str (optional) Select a subset of results based on the specified criteria. The supported filters are: - + * `notebook=''` to select pipelines that reference the provided notebook path. * `name LIKE '[pattern]'` to select pipelines with a name that matches pattern. Wildcards are supported, for example: `name LIKE '%shopping%'` - + Composite filters are not supported. This field is optional. :param max_results: int (optional) The maximum number of entries to return in a single page. The system may return fewer than @@ -309,14 +309,14 @@ default is id asc. This field is optional. :param page_token: str (optional) Page token returned by previous call - + :returns: Iterator over :class:`PipelineStateInfo` .. py:method:: list_updates(pipeline_id: str [, max_results: Optional[int], page_token: Optional[str], until_update_id: Optional[str]]) -> ListUpdatesResponse List updates for an active pipeline. - + :param pipeline_id: str The pipeline to return updates for. :param max_results: int (optional) @@ -325,7 +325,7 @@ Page token returned by previous call :param until_update_id: str (optional) If present, returns updates until and including this update_id. - + :returns: :class:`ListUpdatesResponse` @@ -333,11 +333,11 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param pipeline_id: str The pipeline for which to get or manage permissions. :param access_control_list: List[:class:`PipelineAccessControlRequest`] (optional) - + :returns: :class:`PipelinePermissions` @@ -345,7 +345,7 @@ Starts a new update for the pipeline. If there is already an active update for the pipeline, the request will fail and the active update will remain running. - + :param pipeline_id: str :param cause: :class:`StartUpdateCause` (optional) :param full_refresh: bool (optional) @@ -361,7 +361,7 @@ :param validate_only: bool (optional) If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. - + :returns: :class:`StartUpdateResponse` @@ -369,9 +369,9 @@ Stops the pipeline by canceling the active update. If there is no active update for the pipeline, this request is a no-op. - + :param pipeline_id: str - + :returns: Long-running operation waiter for :class:`GetPipelineResponse`. See :method:wait_get_pipeline_idle for more details. @@ -433,7 +433,7 @@ w.pipelines.delete(pipeline_id=created.pipeline_id) Updates a pipeline with the supplied configuration. - + :param pipeline_id: str Unique identifier for this pipeline. :param allow_duplicate_names: bool (optional) @@ -504,18 +504,18 @@ for pipeline creation in favor of the `schema` field. :param trigger: :class:`PipelineTrigger` (optional) Which pipeline trigger to use. Deprecated: Use `continuous` instead. - - + + .. py:method:: update_permissions(pipeline_id: str [, access_control_list: Optional[List[PipelineAccessControlRequest]]]) -> PipelinePermissions Updates the permissions on a pipeline. Pipelines can inherit permissions from their root object. - + :param pipeline_id: str The pipeline for which to get or manage permissions. :param access_control_list: List[:class:`PipelineAccessControlRequest`] (optional) - + :returns: :class:`PipelinePermissions` diff --git a/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst b/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst index 006d137da..e03006aa0 100644 --- a/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst +++ b/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst @@ -9,55 +9,55 @@ .. py:method:: create_quality_monitor(quality_monitor: QualityMonitor) -> QualityMonitor Create a quality monitor on UC object - + :param quality_monitor: :class:`QualityMonitor` - + :returns: :class:`QualityMonitor` .. py:method:: delete_quality_monitor(object_type: str, object_id: str) Delete a quality monitor on UC object - + :param object_type: str The type of the monitored object. Can be one of the following: schema. :param object_id: str The uuid of the request object. For example, schema id. - - + + .. py:method:: get_quality_monitor(object_type: str, object_id: str) -> QualityMonitor Read a quality monitor on UC object - + :param object_type: str The type of the monitored object. Can be one of the following: schema. :param object_id: str The uuid of the request object. For example, schema id. - + :returns: :class:`QualityMonitor` .. py:method:: list_quality_monitor( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[QualityMonitor] (Unimplemented) List quality monitors - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`QualityMonitor` .. py:method:: update_quality_monitor(object_type: str, object_id: str, quality_monitor: QualityMonitor) -> QualityMonitor (Unimplemented) Update a quality monitor on UC object - + :param object_type: str The type of the monitored object. Can be one of the following: schema. :param object_id: str The uuid of the request object. For example, schema id. :param quality_monitor: :class:`QualityMonitor` - + :returns: :class:`QualityMonitor` \ No newline at end of file diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index c96f3fbbe..cf2621fdb 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -5,7 +5,7 @@ .. py:class:: ServingEndpointsExt The Serving Endpoints API allows you to create, update, and delete model serving endpoints. - + You can use a serving endpoint to serve models from the Databricks Model Registry or from Unity Catalog. Endpoints expose the underlying models as scalable REST API endpoints using serverless compute. This means the endpoints and associated compute resources are fully managed by Databricks and will not appear in your @@ -18,19 +18,19 @@ .. py:method:: build_logs(name: str, served_model_name: str) -> BuildLogsResponse Retrieves the build logs associated with the provided served model. - + :param name: str The name of the serving endpoint that the served model belongs to. This field is required. :param served_model_name: str The name of the served model that build logs will be retrieved for. This field is required. - + :returns: :class:`BuildLogsResponse` .. py:method:: create(name: str [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], config: Optional[EndpointCoreConfigInput], description: Optional[str], email_notifications: Optional[EmailNotifications], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] Create a new serving endpoint. - + :param name: str The name of the serving endpoint. This field is required and must be unique across a Databricks workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. @@ -52,7 +52,7 @@ Enable route optimization for the serving endpoint. :param tags: List[:class:`EndpointTag`] (optional) Tags to be attached to the serving endpoint and automatically propagated to billing logs. - + :returns: Long-running operation waiter for :class:`ServingEndpointDetailed`. See :method:wait_get_serving_endpoint_not_updating for more details. @@ -64,7 +64,7 @@ .. py:method:: create_provisioned_throughput_endpoint(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], email_notifications: Optional[EmailNotifications], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] Create a new PT serving endpoint. - + :param name: str The name of the serving endpoint. This field is required and must be unique across a Databricks workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. @@ -78,7 +78,7 @@ Email notification settings. :param tags: List[:class:`EndpointTag`] (optional) Tags to be attached to the serving endpoint and automatically propagated to billing logs. - + :returns: Long-running operation waiter for :class:`ServingEndpointDetailed`. See :method:wait_get_serving_endpoint_not_updating for more details. @@ -90,30 +90,30 @@ .. py:method:: delete(name: str) Delete a serving endpoint. - + :param name: str - - + + .. py:method:: export_metrics(name: str) -> ExportMetricsResponse Retrieves the metrics associated with the provided serving endpoint in either Prometheus or OpenMetrics exposition format. - + :param name: str The name of the serving endpoint to retrieve metrics for. This field is required. - + :returns: :class:`ExportMetricsResponse` .. py:method:: get(name: str) -> ServingEndpointDetailed Retrieves the details for a single serving endpoint. - + :param name: str The name of the serving endpoint. This field is required. - + :returns: :class:`ServingEndpointDetailed` @@ -161,20 +161,20 @@ Get the query schema of the serving endpoint in OpenAPI format. The schema contains information for the supported paths, input and output format and datatypes. - + :param name: str The name of the serving endpoint that the served model belongs to. This field is required. - + :returns: :class:`GetOpenApiResponse` .. py:method:: get_permission_levels(serving_endpoint_id: str) -> GetServingEndpointPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param serving_endpoint_id: str The serving endpoint for which to get or manage permissions. - + :returns: :class:`GetServingEndpointPermissionLevelsResponse` @@ -182,10 +182,10 @@ Gets the permissions of a serving endpoint. Serving endpoints can inherit permissions from their root object. - + :param serving_endpoint_id: str The serving endpoint for which to get or manage permissions. - + :returns: :class:`ServingEndpointPermissions` @@ -212,46 +212,46 @@ .. py:method:: list() -> Iterator[ServingEndpoint] Get all serving endpoints. - - + + :returns: Iterator over :class:`ServingEndpoint` .. py:method:: logs(name: str, served_model_name: str) -> ServerLogsResponse Retrieves the service logs associated with the provided served model. - + :param name: str The name of the serving endpoint that the served model belongs to. This field is required. :param served_model_name: str The name of the served model that logs will be retrieved for. This field is required. - + :returns: :class:`ServerLogsResponse` .. py:method:: patch(name: str [, add_tags: Optional[List[EndpointTag]], delete_tags: Optional[List[str]]]) -> EndpointTags Used to batch add and delete tags from a serving endpoint with a single API call. - + :param name: str The name of the serving endpoint who's tags to patch. This field is required. :param add_tags: List[:class:`EndpointTag`] (optional) List of endpoint tags to add :param delete_tags: List[str] (optional) List of tag keys to delete - + :returns: :class:`EndpointTags` .. py:method:: put(name: str [, rate_limits: Optional[List[RateLimit]]]) -> PutResponse Deprecated: Please use AI Gateway to manage rate limits instead. - + :param name: str The name of the serving endpoint whose rate limits are being updated. This field is required. :param rate_limits: List[:class:`RateLimit`] (optional) The list of endpoint rate limits. - + :returns: :class:`PutResponse` @@ -259,7 +259,7 @@ Used to update the AI Gateway of a serving endpoint. NOTE: External model, provisioned throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables. - + :param name: str The name of the serving endpoint whose AI Gateway is being updated. This field is required. :param fallback_config: :class:`FallbackConfig` (optional) @@ -275,14 +275,14 @@ :param usage_tracking_config: :class:`AiGatewayUsageTrackingConfig` (optional) Configuration to enable usage tracking using system tables. These tables allow you to monitor operational usage on endpoints and their associated costs. - + :returns: :class:`PutAiGatewayResponse` .. py:method:: query(name: str [, client_request_id: Optional[str], dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float], usage_context: Optional[Dict[str, str]]]) -> QueryEndpointResponse Query a serving endpoint - + :param name: str The name of the serving endpoint. This field is required and is provided via the path parameter. :param client_request_id: str (optional) @@ -331,7 +331,7 @@ other chat/completions query fields. :param usage_context: Dict[str,str] (optional) Optional user-provided context that will be recorded in the usage tracking table. - + :returns: :class:`QueryEndpointResponse` @@ -339,11 +339,11 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param serving_endpoint_id: str The serving endpoint for which to get or manage permissions. :param access_control_list: List[:class:`ServingEndpointAccessControlRequest`] (optional) - + :returns: :class:`ServingEndpointPermissions` @@ -352,7 +352,7 @@ Updates any combination of the serving endpoint's served entities, the compute configuration of those served entities, and the endpoint's traffic config. An endpoint that already has an update in progress can not be updated until the current update completes or fails. - + :param name: str The name of the serving endpoint to update. This field is required. :param auto_capture_config: :class:`AutoCaptureConfigInput` (optional) @@ -367,7 +367,7 @@ config. :param traffic_config: :class:`TrafficConfig` (optional) The traffic configuration associated with the serving endpoint config. - + :returns: Long-running operation waiter for :class:`ServingEndpointDetailed`. See :method:wait_get_serving_endpoint_not_updating for more details. @@ -380,11 +380,11 @@ Updates the permissions on a serving endpoint. Serving endpoints can inherit permissions from their root object. - + :param serving_endpoint_id: str The serving endpoint for which to get or manage permissions. :param access_control_list: List[:class:`ServingEndpointAccessControlRequest`] (optional) - + :returns: :class:`ServingEndpointPermissions` @@ -393,11 +393,11 @@ Updates any combination of the pt endpoint's served entities, the compute configuration of those served entities, and the endpoint's traffic config. Updates are instantaneous and endpoint should be updated instantly - + :param name: str The name of the pt endpoint to update. This field is required. :param config: :class:`PtEndpointCoreConfig` - + :returns: Long-running operation waiter for :class:`ServingEndpointDetailed`. See :method:wait_get_serving_endpoint_not_updating for more details. diff --git a/docs/workspace/serving/serving_endpoints_data_plane.rst b/docs/workspace/serving/serving_endpoints_data_plane.rst index 028231da3..91da6ab75 100644 --- a/docs/workspace/serving/serving_endpoints_data_plane.rst +++ b/docs/workspace/serving/serving_endpoints_data_plane.rst @@ -10,7 +10,7 @@ .. py:method:: query(name: str [, client_request_id: Optional[str], dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float], usage_context: Optional[Dict[str, str]]]) -> QueryEndpointResponse Query a serving endpoint - + :param name: str The name of the serving endpoint. This field is required and is provided via the path parameter. :param client_request_id: str (optional) @@ -59,6 +59,6 @@ other chat/completions query fields. :param usage_context: Dict[str,str] (optional) Optional user-provided context that will be recorded in the usage tracking table. - + :returns: :class:`QueryEndpointResponse` \ No newline at end of file diff --git a/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst b/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst index e471a8257..5179309d0 100644 --- a/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst +++ b/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst @@ -10,14 +10,14 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteAibiDashboardEmbeddingAccessPolicySettingResponse Delete the AI/BI dashboard embedding access policy, reverting back to the default. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteAibiDashboardEmbeddingAccessPolicySettingResponse` @@ -25,21 +25,21 @@ Retrieves the AI/BI dashboard embedding access policy. The default setting is ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on approved domains. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`AibiDashboardEmbeddingAccessPolicySetting` .. py:method:: update(allow_missing: bool, setting: AibiDashboardEmbeddingAccessPolicySetting, field_mask: str) -> AibiDashboardEmbeddingAccessPolicySetting Updates the AI/BI dashboard embedding access policy at the workspace level. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`AibiDashboardEmbeddingAccessPolicySetting` @@ -49,10 +49,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`AibiDashboardEmbeddingAccessPolicySetting` \ No newline at end of file diff --git a/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst b/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst index 53812bee9..0631da21f 100644 --- a/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst +++ b/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst @@ -11,28 +11,28 @@ Delete the list of domains approved to host embedded AI/BI dashboards, reverting back to the default empty list. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse` .. py:method:: get( [, etag: Optional[str]]) -> AibiDashboardEmbeddingApprovedDomainsSetting Retrieves the list of domains approved to host embedded AI/BI dashboards. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` @@ -40,7 +40,7 @@ Updates the list of domains approved to host embedded AI/BI dashboards. This update will fail if the current workspace access policy is not ALLOW_APPROVED_DOMAINS. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` @@ -50,10 +50,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` \ No newline at end of file diff --git a/docs/workspace/settings/automatic_cluster_update.rst b/docs/workspace/settings/automatic_cluster_update.rst index 7ba756530..f7126b881 100644 --- a/docs/workspace/settings/automatic_cluster_update.rst +++ b/docs/workspace/settings/automatic_cluster_update.rst @@ -10,14 +10,14 @@ .. py:method:: get( [, etag: Optional[str]]) -> AutomaticClusterUpdateSetting Gets the automatic cluster update setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`AutomaticClusterUpdateSetting` @@ -27,7 +27,7 @@ `PATCH` requests (as part of the setting field). The etag can be retrieved by making a `GET` request before the `PATCH` request. If the setting is updated concurrently, `PATCH` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`AutomaticClusterUpdateSetting` @@ -37,10 +37,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`AutomaticClusterUpdateSetting` \ No newline at end of file diff --git a/docs/workspace/settings/compliance_security_profile.rst b/docs/workspace/settings/compliance_security_profile.rst index de2894e86..3a577912f 100644 --- a/docs/workspace/settings/compliance_security_profile.rst +++ b/docs/workspace/settings/compliance_security_profile.rst @@ -6,20 +6,20 @@ Controls whether to enable the compliance security profile for the current workspace. Enabling it on a workspace is permanent. By default, it is turned off. - + This settings can NOT be disabled once it is enabled. .. py:method:: get( [, etag: Optional[str]]) -> ComplianceSecurityProfileSetting Gets the compliance security profile setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`ComplianceSecurityProfileSetting` @@ -29,7 +29,7 @@ in `PATCH` requests (as part of the setting field). The etag can be retrieved by making a `GET` request before the `PATCH` request. If the setting is updated concurrently, `PATCH` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`ComplianceSecurityProfileSetting` @@ -39,10 +39,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`ComplianceSecurityProfileSetting` \ No newline at end of file diff --git a/docs/workspace/settings/credentials_manager.rst b/docs/workspace/settings/credentials_manager.rst index 8063d42ad..362e947f0 100644 --- a/docs/workspace/settings/credentials_manager.rst +++ b/docs/workspace/settings/credentials_manager.rst @@ -11,13 +11,13 @@ Exchange tokens with an Identity Provider to get a new access token. It allows specifying scopes to determine token permissions. - + :param partition_id: :class:`PartitionId` The partition of Credentials store :param token_type: List[:class:`TokenType`] A list of token types being requested :param scopes: List[str] Array of scopes for the token request. - + :returns: :class:`ExchangeTokenResponse` \ No newline at end of file diff --git a/docs/workspace/settings/dashboard_email_subscriptions.rst b/docs/workspace/settings/dashboard_email_subscriptions.rst index ac8c3e834..ebea6950d 100644 --- a/docs/workspace/settings/dashboard_email_subscriptions.rst +++ b/docs/workspace/settings/dashboard_email_subscriptions.rst @@ -11,35 +11,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDashboardEmailSubscriptionsResponse Reverts the Dashboard Email Subscriptions setting to its default value. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteDashboardEmailSubscriptionsResponse` .. py:method:: get( [, etag: Optional[str]]) -> DashboardEmailSubscriptions Gets the Dashboard Email Subscriptions setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DashboardEmailSubscriptions` .. py:method:: update(allow_missing: bool, setting: DashboardEmailSubscriptions, field_mask: str) -> DashboardEmailSubscriptions Updates the Dashboard Email Subscriptions setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`DashboardEmailSubscriptions` @@ -49,10 +49,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`DashboardEmailSubscriptions` \ No newline at end of file diff --git a/docs/workspace/settings/default_namespace.rst b/docs/workspace/settings/default_namespace.rst index d435a3575..d23accb81 100644 --- a/docs/workspace/settings/default_namespace.rst +++ b/docs/workspace/settings/default_namespace.rst @@ -6,12 +6,12 @@ The default namespace setting API allows users to configure the default namespace for a Databricks workspace. - + Through this API, users can retrieve, set, or modify the default namespace used when queries do not reference a fully qualified three-level name. For example, if you use the API to set 'retail_prod' as the default catalog, then a query 'SELECT * FROM myTable' would reference the object 'retail_prod.default.myTable' (the schema 'default' is always assumed). - + This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. @@ -21,28 +21,28 @@ requests (as a query parameter). The etag can be retrieved by making a `GET` request before the `DELETE` request. If the setting is updated/deleted concurrently, `DELETE` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteDefaultNamespaceSettingResponse` .. py:method:: get( [, etag: Optional[str]]) -> DefaultNamespaceSetting Gets the default namespace setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DefaultNamespaceSetting` @@ -54,7 +54,7 @@ etag is present in the error response, which should be set in the `PATCH` request. If the setting is updated concurrently, `PATCH` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`DefaultNamespaceSetting` @@ -64,10 +64,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`DefaultNamespaceSetting` \ No newline at end of file diff --git a/docs/workspace/settings/default_warehouse_id.rst b/docs/workspace/settings/default_warehouse_id.rst index 4da9ff8c0..2643f58f7 100644 --- a/docs/workspace/settings/default_warehouse_id.rst +++ b/docs/workspace/settings/default_warehouse_id.rst @@ -10,35 +10,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDefaultWarehouseIdResponse Reverts the Default Warehouse Id setting to its default value. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteDefaultWarehouseIdResponse` .. py:method:: get( [, etag: Optional[str]]) -> DefaultWarehouseId Gets the Default Warehouse Id setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DefaultWarehouseId` .. py:method:: update(allow_missing: bool, setting: DefaultWarehouseId, field_mask: str) -> DefaultWarehouseId Updates the Default Warehouse Id setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`DefaultWarehouseId` @@ -48,10 +48,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`DefaultWarehouseId` \ No newline at end of file diff --git a/docs/workspace/settings/disable_legacy_access.rst b/docs/workspace/settings/disable_legacy_access.rst index 4f36218af..c14cdd52b 100644 --- a/docs/workspace/settings/disable_legacy_access.rst +++ b/docs/workspace/settings/disable_legacy_access.rst @@ -5,7 +5,7 @@ .. py:class:: DisableLegacyAccessAPI 'Disabling legacy access' has the following impacts: - + 1. Disables direct access to Hive Metastores from the workspace. However, you can still access a Hive Metastore through Hive Metastore federation. 2. Disables fallback mode on external location access from the workspace. 3. Disables Databricks Runtime versions prior to 13.3LTS. @@ -13,35 +13,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyAccessResponse Deletes legacy access disablement status. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteDisableLegacyAccessResponse` .. py:method:: get( [, etag: Optional[str]]) -> DisableLegacyAccess Retrieves legacy access disablement Status. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DisableLegacyAccess` .. py:method:: update(allow_missing: bool, setting: DisableLegacyAccess, field_mask: str) -> DisableLegacyAccess Updates legacy access disablement status. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`DisableLegacyAccess` @@ -51,10 +51,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`DisableLegacyAccess` \ No newline at end of file diff --git a/docs/workspace/settings/disable_legacy_dbfs.rst b/docs/workspace/settings/disable_legacy_dbfs.rst index d54495db1..15ee7b848 100644 --- a/docs/workspace/settings/disable_legacy_dbfs.rst +++ b/docs/workspace/settings/disable_legacy_dbfs.rst @@ -5,10 +5,10 @@ .. py:class:: DisableLegacyDbfsAPI Disabling legacy DBFS has the following implications: - + 1. Access to DBFS root and DBFS mounts is disallowed (as well as the creation of new mounts). 2. Disables Databricks Runtime versions prior to 13.3LTS. - + When the setting is off, all DBFS functionality is enabled and no restrictions are imposed on Databricks Runtime versions. This setting can take up to 20 minutes to take effect and requires a manual restart of all-purpose compute clusters and SQL warehouses. @@ -16,35 +16,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyDbfsResponse Deletes the disable legacy DBFS setting for a workspace, reverting back to the default. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteDisableLegacyDbfsResponse` .. py:method:: get( [, etag: Optional[str]]) -> DisableLegacyDbfs Gets the disable legacy DBFS setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DisableLegacyDbfs` .. py:method:: update(allow_missing: bool, setting: DisableLegacyDbfs, field_mask: str) -> DisableLegacyDbfs Updates the disable legacy DBFS setting for the workspace. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`DisableLegacyDbfs` @@ -54,10 +54,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`DisableLegacyDbfs` \ No newline at end of file diff --git a/docs/workspace/settings/enable_export_notebook.rst b/docs/workspace/settings/enable_export_notebook.rst index 8d7df57d5..a61407400 100644 --- a/docs/workspace/settings/enable_export_notebook.rst +++ b/docs/workspace/settings/enable_export_notebook.rst @@ -10,8 +10,8 @@ .. py:method:: get_enable_export_notebook() -> EnableExportNotebook Gets the Notebook and File exporting setting. - - + + :returns: :class:`EnableExportNotebook` @@ -19,7 +19,7 @@ Updates the Notebook and File exporting setting. The model follows eventual consistency, which means the get after the update operation might receive stale values for some time. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`EnableExportNotebook` @@ -29,10 +29,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`EnableExportNotebook` \ No newline at end of file diff --git a/docs/workspace/settings/enable_notebook_table_clipboard.rst b/docs/workspace/settings/enable_notebook_table_clipboard.rst index c3e7b656e..e7a35b439 100644 --- a/docs/workspace/settings/enable_notebook_table_clipboard.rst +++ b/docs/workspace/settings/enable_notebook_table_clipboard.rst @@ -10,8 +10,8 @@ .. py:method:: get_enable_notebook_table_clipboard() -> EnableNotebookTableClipboard Gets the Results Table Clipboard features setting. - - + + :returns: :class:`EnableNotebookTableClipboard` @@ -19,7 +19,7 @@ Updates the Results Table Clipboard features setting. The model follows eventual consistency, which means the get after the update operation might receive stale values for some time. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`EnableNotebookTableClipboard` @@ -29,10 +29,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`EnableNotebookTableClipboard` \ No newline at end of file diff --git a/docs/workspace/settings/enable_results_downloading.rst b/docs/workspace/settings/enable_results_downloading.rst index b50d45b64..4db81d343 100644 --- a/docs/workspace/settings/enable_results_downloading.rst +++ b/docs/workspace/settings/enable_results_downloading.rst @@ -9,8 +9,8 @@ .. py:method:: get_enable_results_downloading() -> EnableResultsDownloading Gets the Notebook results download setting. - - + + :returns: :class:`EnableResultsDownloading` @@ -18,7 +18,7 @@ Updates the Notebook results download setting. The model follows eventual consistency, which means the get after the update operation might receive stale values for some time. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`EnableResultsDownloading` @@ -28,10 +28,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`EnableResultsDownloading` \ No newline at end of file diff --git a/docs/workspace/settings/enhanced_security_monitoring.rst b/docs/workspace/settings/enhanced_security_monitoring.rst index 73b6fb837..172686fc0 100644 --- a/docs/workspace/settings/enhanced_security_monitoring.rst +++ b/docs/workspace/settings/enhanced_security_monitoring.rst @@ -7,21 +7,21 @@ Controls whether enhanced security monitoring is enabled for the current workspace. If the compliance security profile is enabled, this is automatically enabled. By default, it is disabled. However, if the compliance security profile is enabled, this is automatically enabled. - + If the compliance security profile is disabled, you can enable or disable this setting and it is not permanent. .. py:method:: get( [, etag: Optional[str]]) -> EnhancedSecurityMonitoringSetting Gets the enhanced security monitoring setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`EnhancedSecurityMonitoringSetting` @@ -31,7 +31,7 @@ in `PATCH` requests (as part of the setting field). The etag can be retrieved by making a `GET` request before the `PATCH` request. If the setting is updated concurrently, `PATCH` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`EnhancedSecurityMonitoringSetting` @@ -41,10 +41,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`EnhancedSecurityMonitoringSetting` \ No newline at end of file diff --git a/docs/workspace/settings/ip_access_lists.rst b/docs/workspace/settings/ip_access_lists.rst index a66fe1afb..efb35048d 100644 --- a/docs/workspace/settings/ip_access_lists.rst +++ b/docs/workspace/settings/ip_access_lists.rst @@ -5,21 +5,21 @@ .. py:class:: IpAccessListsAPI IP Access List enables admins to configure IP access lists. - + IP access lists affect web application access and REST API access to this workspace only. If the feature is disabled for a workspace, all access is allowed for this workspace. There is support for allow lists (inclusion) and block lists (exclusion). - + When a connection is attempted: 1. **First, all block lists are checked.** If the connection IP address matches any block list, the connection is rejected. 2. **If the connection was not rejected by block lists**, the IP address is compared with the allow lists. - + If there is at least one allow list for the workspace, the connection is allowed only if the IP address matches an allow list. If there are no allow lists for the workspace, all IP addresses are allowed. - + For all allow lists and block lists combined, the workspace supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. - + After changes to the IP access list feature, it can take a few minutes for changes to take effect. .. py:method:: create(label: str, list_type: ListType [, ip_addresses: Optional[List[str]]]) -> CreateIpAccessListResponse @@ -46,36 +46,36 @@ w.ip_access_lists.delete(ip_access_list_id=created.ip_access_list.list_id) Creates an IP access list for this workspace. - + A list can be an allow list or a block list. See the top of this file for a description of how the server treats allow lists and block lists at runtime. - + When creating or updating an IP access list: - + * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. * If the new list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - + It can take a few minutes for the changes to take effect. **Note**: Your new IP access list has no effect until you enable the feature. See :method:workspaceconf/setStatus - + :param label: str Label for the IP access list. This **cannot** be empty. :param list_type: :class:`ListType` :param ip_addresses: List[str] (optional) - + :returns: :class:`CreateIpAccessListResponse` .. py:method:: delete(ip_access_list_id: str) Deletes an IP access list, specified by its list ID. - + :param ip_access_list_id: str The ID for the corresponding IP access list - - + + .. py:method:: get(ip_access_list_id: str) -> FetchIpAccessListResponse @@ -104,10 +104,10 @@ w.ip_access_lists.delete(ip_access_list_id=created.ip_access_list.list_id) Gets an IP access list, specified by its list ID. - + :param ip_access_list_id: str The ID for the corresponding IP access list - + :returns: :class:`FetchIpAccessListResponse` @@ -125,8 +125,8 @@ all = w.ip_access_lists.list() Gets all IP access lists for the specified workspace. - - + + :returns: Iterator over :class:`IpAccessListInfo` @@ -162,7 +162,7 @@ w.ip_access_lists.delete(ip_access_list_id=created.ip_access_list.list_id) Replaces an IP access list, specified by its ID. - + A list can include allow lists and block lists. See the top of this file for a description of how the server treats allow lists and block lists at run time. When replacing an IP access list: * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, where one @@ -171,7 +171,7 @@ returned with `error_code` value `INVALID_STATE`. It can take a few minutes for the changes to take effect. Note that your resulting IP access list has no effect until you enable the feature. See :method:workspaceconf/setStatus. - + :param ip_access_list_id: str The ID for the corresponding IP access list :param label: str @@ -180,27 +180,27 @@ :param enabled: bool Specifies whether this IP access list is enabled. :param ip_addresses: List[str] (optional) - - + + .. py:method:: update(ip_access_list_id: str [, enabled: Optional[bool], ip_addresses: Optional[List[str]], label: Optional[str], list_type: Optional[ListType]]) Updates an existing IP access list, specified by its ID. - + A list can include allow lists and block lists. See the top of this file for a description of how the server treats allow lists and block lists at run time. - + When updating an IP access list: - + * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. * If the updated list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - + It can take a few minutes for the changes to take effect. Note that your resulting IP access list has no effect until you enable the feature. See :method:workspaceconf/setStatus. - + :param ip_access_list_id: str The ID for the corresponding IP access list :param enabled: bool (optional) @@ -209,6 +209,6 @@ :param label: str (optional) Label for the IP access list. This **cannot** be empty. :param list_type: :class:`ListType` (optional) - - + + \ No newline at end of file diff --git a/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst b/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst index e7f6448e7..4c7ad1410 100644 --- a/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst +++ b/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst @@ -9,35 +9,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteLlmProxyPartnerPoweredWorkspaceResponse Reverts the enable partner powered AI features workspace setting to its default value. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteLlmProxyPartnerPoweredWorkspaceResponse` .. py:method:: get( [, etag: Optional[str]]) -> LlmProxyPartnerPoweredWorkspace Gets the enable partner powered AI features workspace setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`LlmProxyPartnerPoweredWorkspace` .. py:method:: update(allow_missing: bool, setting: LlmProxyPartnerPoweredWorkspace, field_mask: str) -> LlmProxyPartnerPoweredWorkspace Updates the enable partner powered AI features workspace setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`LlmProxyPartnerPoweredWorkspace` @@ -47,10 +47,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`LlmProxyPartnerPoweredWorkspace` \ No newline at end of file diff --git a/docs/workspace/settings/notification_destinations.rst b/docs/workspace/settings/notification_destinations.rst index 2f99a3b3d..31502394c 100644 --- a/docs/workspace/settings/notification_destinations.rst +++ b/docs/workspace/settings/notification_destinations.rst @@ -12,40 +12,40 @@ .. py:method:: create( [, config: Optional[Config], display_name: Optional[str]]) -> NotificationDestination Creates a notification destination. Requires workspace admin permissions. - + :param config: :class:`Config` (optional) The configuration for the notification destination. Must wrap EXACTLY one of the nested configs. :param display_name: str (optional) The display name for the notification destination. - + :returns: :class:`NotificationDestination` .. py:method:: delete(id: str) Deletes a notification destination. Requires workspace admin permissions. - + :param id: str - - + + .. py:method:: get(id: str) -> NotificationDestination Gets a notification destination. - + :param id: str - + :returns: :class:`NotificationDestination` .. py:method:: list( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ListNotificationDestinationsResult] Lists notification destinations. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ListNotificationDestinationsResult` @@ -53,13 +53,13 @@ Updates a notification destination. Requires workspace admin permissions. At least one field is required in the request body. - + :param id: str UUID identifying notification destination. :param config: :class:`Config` (optional) The configuration for the notification destination. Must wrap EXACTLY one of the nested configs. :param display_name: str (optional) The display name for the notification destination. - + :returns: :class:`NotificationDestination` \ No newline at end of file diff --git a/docs/workspace/settings/restrict_workspace_admins.rst b/docs/workspace/settings/restrict_workspace_admins.rst index 7dcbab9cb..e87e5c04d 100644 --- a/docs/workspace/settings/restrict_workspace_admins.rst +++ b/docs/workspace/settings/restrict_workspace_admins.rst @@ -20,28 +20,28 @@ provided in `DELETE` requests (as a query parameter). The etag can be retrieved by making a `GET` request before the DELETE request. If the setting is updated/deleted concurrently, `DELETE` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteRestrictWorkspaceAdminsSettingResponse` .. py:method:: get( [, etag: Optional[str]]) -> RestrictWorkspaceAdminsSetting Gets the restrict workspace admins setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`RestrictWorkspaceAdminsSetting` @@ -51,7 +51,7 @@ `PATCH` requests (as part of the setting field). The etag can be retrieved by making a GET request before the `PATCH` request. If the setting is updated concurrently, `PATCH` fails with 409 and the request must be retried by using the fresh etag in the 409 response. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`RestrictWorkspaceAdminsSetting` @@ -61,10 +61,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`RestrictWorkspaceAdminsSetting` \ No newline at end of file diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index c52c2e601..4f2783f3c 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -29,7 +29,7 @@ Controls whether to enable the compliance security profile for the current workspace. Enabling it on a workspace is permanent. By default, it is turned off. - + This settings can NOT be disabled once it is enabled. .. py:property:: dashboard_email_subscriptions @@ -44,12 +44,12 @@ The default namespace setting API allows users to configure the default namespace for a Databricks workspace. - + Through this API, users can retrieve, set, or modify the default namespace used when queries do not reference a fully qualified three-level name. For example, if you use the API to set 'retail_prod' as the default catalog, then a query 'SELECT * FROM myTable' would reference the object 'retail_prod.default.myTable' (the schema 'default' is always assumed). - + This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. @@ -63,7 +63,7 @@ :type: DisableLegacyAccessAPI 'Disabling legacy access' has the following impacts: - + 1. Disables direct access to Hive Metastores from the workspace. However, you can still access a Hive Metastore through Hive Metastore federation. 2. Disables fallback mode on external location access from the workspace. 3. Disables Databricks Runtime versions prior to 13.3LTS. @@ -72,10 +72,10 @@ :type: DisableLegacyDbfsAPI Disabling legacy DBFS has the following implications: - + 1. Access to DBFS root and DBFS mounts is disallowed (as well as the creation of new mounts). 2. Disables Databricks Runtime versions prior to 13.3LTS. - + When the setting is off, all DBFS functionality is enabled and no restrictions are imposed on Databricks Runtime versions. This setting can take up to 20 minutes to take effect and requires a manual restart of all-purpose compute clusters and SQL warehouses. @@ -103,7 +103,7 @@ Controls whether enhanced security monitoring is enabled for the current workspace. If the compliance security profile is enabled, this is automatically enabled. By default, it is disabled. However, if the compliance security profile is enabled, this is automatically enabled. - + If the compliance security profile is disabled, you can enable or disable this setting and it is not permanent. diff --git a/docs/workspace/settings/sql_results_download.rst b/docs/workspace/settings/sql_results_download.rst index 8afad7764..49042b2c0 100644 --- a/docs/workspace/settings/sql_results_download.rst +++ b/docs/workspace/settings/sql_results_download.rst @@ -10,35 +10,35 @@ .. py:method:: delete( [, etag: Optional[str]]) -> DeleteSqlResultsDownloadResponse Reverts the SQL Results Download setting to its default value. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`DeleteSqlResultsDownloadResponse` .. py:method:: get( [, etag: Optional[str]]) -> SqlResultsDownload Gets the SQL Results Download setting. - + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - + :returns: :class:`SqlResultsDownload` .. py:method:: update(allow_missing: bool, setting: SqlResultsDownload, field_mask: str) -> SqlResultsDownload Updates the SQL Results Download setting. - + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. :param setting: :class:`SqlResultsDownload` @@ -48,10 +48,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`SqlResultsDownload` \ No newline at end of file diff --git a/docs/workspace/settings/token_management.rst b/docs/workspace/settings/token_management.rst index ee0e831b8..724a09c85 100644 --- a/docs/workspace/settings/token_management.rst +++ b/docs/workspace/settings/token_management.rst @@ -35,25 +35,25 @@ w.token_management.delete(token_id=obo.token_info.token_id) Creates a token on behalf of a service principal. - + :param application_id: str Application ID of the service principal. :param comment: str (optional) Comment that describes the purpose of the token. :param lifetime_seconds: int (optional) The number of seconds before the token expires. - + :returns: :class:`CreateOboTokenResponse` .. py:method:: delete(token_id: str) Deletes a token, specified by its ID. - + :param token_id: str The ID of the token to revoke. - - + + .. py:method:: get(token_id: str) -> GetTokenResponse @@ -86,26 +86,26 @@ w.token_management.delete(token_id=obo.token_info.token_id) Gets information about a token, specified by its ID. - + :param token_id: str The ID of the token to get. - + :returns: :class:`GetTokenResponse` .. py:method:: get_permission_levels() -> GetTokenPermissionLevelsResponse Gets the permission levels that a user can have on an object. - - + + :returns: :class:`GetTokenPermissionLevelsResponse` .. py:method:: get_permissions() -> TokenPermissions Gets the permissions of all tokens. Tokens can inherit permissions from their root object. - - + + :returns: :class:`TokenPermissions` @@ -124,12 +124,12 @@ all = w.token_management.list(settings.ListTokenManagementRequest()) Lists all tokens associated with the specified workspace or user. - + :param created_by_id: int (optional) User ID of the user that created the token. :param created_by_username: str (optional) Username of the user that created the token. - + :returns: Iterator over :class:`TokenInfo` @@ -137,17 +137,17 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param access_control_list: List[:class:`TokenAccessControlRequest`] (optional) - + :returns: :class:`TokenPermissions` .. py:method:: update_permissions( [, access_control_list: Optional[List[TokenAccessControlRequest]]]) -> TokenPermissions Updates the permissions on all tokens. Tokens can inherit permissions from their root object. - + :param access_control_list: List[:class:`TokenAccessControlRequest`] (optional) - + :returns: :class:`TokenPermissions` \ No newline at end of file diff --git a/docs/workspace/settings/tokens.rst b/docs/workspace/settings/tokens.rst index 18e7161cc..9a4508055 100644 --- a/docs/workspace/settings/tokens.rst +++ b/docs/workspace/settings/tokens.rst @@ -28,27 +28,27 @@ Creates and returns a token for a user. If this call is made through token authentication, it creates a token with the same client ID as the authenticated token. If the user's token quota is exceeded, this call returns an error **QUOTA_EXCEEDED**. - + :param comment: str (optional) Optional description to attach to the token. :param lifetime_seconds: int (optional) The lifetime of the token, in seconds. - + If the lifetime is not specified, this token remains valid indefinitely. - + :returns: :class:`CreateTokenResponse` .. py:method:: delete(token_id: str) Revokes an access token. - + If a token with the specified ID is not valid, this call returns an error **RESOURCE_DOES_NOT_EXIST**. - + :param token_id: str The ID of the token to be revoked. - - + + .. py:method:: list() -> Iterator[PublicTokenInfo] @@ -65,7 +65,7 @@ all = w.tokens.list() Lists all the valid tokens for a user-workspace pair. - - + + :returns: Iterator over :class:`PublicTokenInfo` \ No newline at end of file diff --git a/docs/workspace/settings/workspace_conf.rst b/docs/workspace/settings/workspace_conf.rst index 52701f8a0..0301c4958 100644 --- a/docs/workspace/settings/workspace_conf.rst +++ b/docs/workspace/settings/workspace_conf.rst @@ -20,12 +20,16 @@ conf = w.workspace_conf.get_status(keys="enableWorkspaceFilesystem") Gets the configuration status for a workspace. - + :param keys: str - + :returns: Dict[str,str] .. py:method:: set_status(contents: Dict[str, str]) - Sets the configuration status for a workspace, including enabling or disabling it. \ No newline at end of file + Sets the configuration status for a workspace, including enabling or disabling it. + + + + \ No newline at end of file diff --git a/docs/workspace/settingsv2/workspace_settings_v2.rst b/docs/workspace/settingsv2/workspace_settings_v2.rst index da8557baf..f3a7522bd 100644 --- a/docs/workspace/settingsv2/workspace_settings_v2.rst +++ b/docs/workspace/settingsv2/workspace_settings_v2.rst @@ -9,9 +9,9 @@ .. py:method:: get_public_workspace_setting(name: str) -> Setting Get a setting value at workspace level - + :param name: str - + :returns: :class:`Setting` @@ -20,7 +20,7 @@ List valid setting keys and metadata. These settings are available to referenced via [GET /api/2.1/settings/{name}](#~1api~1workspace~1settingsv2~1getpublicworkspacesetting) and [PATCH /api/2.1/settings/{name}](#~1api~1workspace~1settingsv2~patchpublicworkspacesetting) APIs - + :param page_size: int (optional) The maximum number of settings to return. The service may return fewer than this value. If unspecified, at most 200 settings will be returned. The maximum value is 1000; values above 1000 @@ -28,19 +28,19 @@ :param page_token: str (optional) A page token, received from a previous `ListWorkspaceSettingsMetadataRequest` call. Provide this to retrieve the subsequent page. - + When paginating, all other parameters provided to `ListWorkspaceSettingsMetadataRequest` must match the call that provided the page token. - + :returns: Iterator over :class:`SettingsMetadata` .. py:method:: patch_public_workspace_setting(name: str, setting: Setting) -> Setting Patch a setting value at workspace level - + :param name: str :param setting: :class:`Setting` - + :returns: :class:`Setting` \ No newline at end of file diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index fd81e1b24..7462508cd 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -34,7 +34,7 @@ Creates a new authentication provider minimally based on a name and authentication type. The caller must be an admin on the metastore. - + :param name: str The name of the Provider. :param authentication_type: :class:`AuthenticationType` @@ -43,7 +43,7 @@ :param recipient_profile_str: str (optional) This field is required when the __authentication_type__ is **TOKEN**, **OAUTH_CLIENT_CREDENTIALS** or not provided. - + :returns: :class:`ProviderInfo` @@ -51,11 +51,11 @@ Deletes an authentication provider, if the caller is a metastore admin or is the owner of the provider. - + :param name: str Name of the provider. - - + + .. py:method:: get(name: str) -> ProviderInfo @@ -87,10 +87,10 @@ Gets a specific authentication provider. The caller must supply the name of the provider, and must either be a metastore admin or the owner of the provider. - + :param name: str Name of the provider. - + :returns: :class:`ProviderInfo` @@ -111,7 +111,7 @@ Gets an array of available authentication providers. The caller must either be a metastore admin or the owner of the providers. Providers not owned by the caller are not included in the response. There is no guarantee of a specific ordering of the elements in the array. - + :param data_provider_global_metastore_id: str (optional) If not provided, all providers will be returned. If no providers exist with this ID, no results will be returned. @@ -125,7 +125,7 @@ from the response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ProviderInfo` @@ -133,7 +133,7 @@ Get arrays of assets associated with a specified provider's share. The caller is the recipient of the share. - + :param provider_name: str The name of the provider who owns the share. :param share_name: str @@ -146,7 +146,7 @@ Maximum number of tables to return. :param volume_max_results: int (optional) Maximum number of volumes to return. - + :returns: :class:`ListProviderShareAssetsResponse` @@ -178,9 +178,9 @@ w.providers.delete(name=created.name) Gets an array of a specified provider's shares within the metastore where: - + * the caller is a metastore admin, or * the caller is the owner. - + :param name: str Name of the provider in which to list shares. :param max_results: int (optional) @@ -193,7 +193,7 @@ response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ProviderShare` @@ -227,7 +227,7 @@ Updates the information for an authentication provider, if the caller is a metastore admin or is the owner of the provider. If the update changes the provider name, the caller must be both a metastore admin and the owner of the provider. - + :param name: str Name of the provider. :param comment: str (optional) @@ -239,6 +239,6 @@ :param recipient_profile_str: str (optional) This field is required when the __authentication_type__ is **TOKEN**, **OAUTH_CLIENT_CREDENTIALS** or not provided. - + :returns: :class:`ProviderInfo` \ No newline at end of file diff --git a/docs/workspace/sharing/recipient_activation.rst b/docs/workspace/sharing/recipient_activation.rst index 16656b384..da1654528 100644 --- a/docs/workspace/sharing/recipient_activation.rst +++ b/docs/workspace/sharing/recipient_activation.rst @@ -8,26 +8,26 @@ the authentication type of `TOKEN`. The data recipient follows the activation link shared by the data provider to download the credential file that includes the access token. The recipient will then use the credential file to establish a secure connection with the provider to receive the shared data. - + Note that you can download the credential file only once. Recipients should treat the downloaded credential as a secret and must not share it outside of their organization. .. py:method:: get_activation_url_info(activation_url: str) Gets an activation URL for a share. - + :param activation_url: str The one time activation url. It also accepts activation token. - - + + .. py:method:: retrieve_token(activation_url: str) -> RetrieveTokenResponse Retrieve access token with an activation url. This is a public API without any authentication. - + :param activation_url: str The one time activation url. It also accepts activation token. - + :returns: :class:`RetrieveTokenResponse` \ No newline at end of file diff --git a/docs/workspace/sharing/recipient_federation_policies.rst b/docs/workspace/sharing/recipient_federation_policies.rst index 0cdcd8559..53ffab3ee 100644 --- a/docs/workspace/sharing/recipient_federation_policies.rst +++ b/docs/workspace/sharing/recipient_federation_policies.rst @@ -19,7 +19,7 @@ Multi-Factor Authentication (MFA), and enhances security by minimizing the risk of credential leakage through the use of short-lived, expiring tokens. It is designed for strong identity governance, secure cross-platform data sharing, and reduced operational overhead for credential management. - + For more information, see https://www.databricks.com/blog/announcing-oidc-token-federation-enhanced-delta-sharing-security and https://docs.databricks.com/en/delta-sharing/create-recipient-oidc-fed @@ -32,27 +32,27 @@ non-Databricks recipients. The federation policy validates OIDC claims in federated tokens and is defined at the recipient level. This enables secretless sharing clients to authenticate using OIDC tokens. - + Supported scenarios for federation policies: 1. **User-to-Machine (U2M) flow** (e.g., PowerBI): A user accesses a resource using their own identity. 2. **Machine-to-Machine (M2M) flow** (e.g., OAuth App): An OAuth App accesses a resource using its own identity, typically for tasks like running nightly jobs. - + For an overview, refer to: - Blog post: Overview of feature: https://www.databricks.com/blog/announcing-oidc-token-federation-enhanced-delta-sharing-security - + For detailed configuration guides based on your use case: - Creating a Federation Policy as a provider: https://docs.databricks.com/en/delta-sharing/create-recipient-oidc-fed - Configuration and usage for Machine-to-Machine (M2M) applications (e.g., Python Delta Sharing Client): https://docs.databricks.com/aws/en/delta-sharing/sharing-over-oidc-m2m - Configuration and usage for User-to-Machine (U2M) applications (e.g., PowerBI): https://docs.databricks.com/aws/en/delta-sharing/sharing-over-oidc-u2m - + :param recipient_name: str Name of the recipient. This is the name of the recipient for which the policy is being created. :param policy: :class:`FederationPolicy` Name of the policy. This is the name of the policy to be created. - + :returns: :class:`FederationPolicy` @@ -60,25 +60,25 @@ Deletes an existing federation policy for an OIDC_FEDERATION recipient. The caller must be the owner of the recipient. - + :param recipient_name: str Name of the recipient. This is the name of the recipient for which the policy is being deleted. :param name: str Name of the policy. This is the name of the policy to be deleted. - - + + .. py:method:: get_federation_policy(recipient_name: str, name: str) -> FederationPolicy Reads an existing federation policy for an OIDC_FEDERATION recipient for sharing data from Databricks to non-Databricks recipients. The caller must have read access to the recipient. - + :param recipient_name: str Name of the recipient. This is the name of the recipient for which the policy is being retrieved. :param name: str Name of the policy. This is the name of the policy to be retrieved. - + :returns: :class:`FederationPolicy` @@ -86,12 +86,12 @@ Lists federation policies for an OIDC_FEDERATION recipient for sharing data from Databricks to non-Databricks recipients. The caller must have read access to the recipient. - + :param recipient_name: str Name of the recipient. This is the name of the recipient for which the policies are being listed. :param max_results: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`FederationPolicy` @@ -99,7 +99,7 @@ Updates an existing federation policy for an OIDC_RECIPIENT. The caller must be the owner of the recipient. - + :param recipient_name: str Name of the recipient. This is the name of the recipient for which the policy is being updated. :param name: str @@ -111,6 +111,6 @@ should be updated (full replacement). If unspecified, all fields that are set in the policy provided in the update request will overwrite the corresponding fields in the existing policy. Example value: 'comment,oidc_policy.audiences'. - + :returns: :class:`FederationPolicy` \ No newline at end of file diff --git a/docs/workspace/sharing/recipients.rst b/docs/workspace/sharing/recipients.rst index 2f921319c..0cda6a17a 100644 --- a/docs/workspace/sharing/recipients.rst +++ b/docs/workspace/sharing/recipients.rst @@ -7,12 +7,12 @@ A recipient is an object you create using :method:recipients/create to represent an organization which you want to allow access shares. The way how sharing works differs depending on whether or not your recipient has access to a Databricks workspace that is enabled for Unity Catalog: - + - For recipients with access to a Databricks workspace that is enabled for Unity Catalog, you can create a recipient object along with a unique sharing identifier you get from the recipient. The sharing identifier is the key identifier that enables the secure connection. This sharing mode is called **Databricks-to-Databricks sharing**. - + - For recipients without access to a Databricks workspace that is enabled for Unity Catalog, when you create a recipient object, Databricks generates an activation link you can send to the recipient. The recipient follows the activation link to download the credential file, and then uses the credential file @@ -38,7 +38,7 @@ Creates a new recipient with the delta sharing authentication type in the metastore. The caller must be a metastore admin or have the **CREATE_RECIPIENT** privilege on the metastore. - + :param name: str Name of Recipient. :param authentication_type: :class:`AuthenticationType` @@ -61,18 +61,18 @@ :param sharing_code: str (optional) The one-time sharing code provided by the data recipient. This field is only present when the __authentication_type__ is **DATABRICKS**. - + :returns: :class:`RecipientInfo` .. py:method:: delete(name: str) Deletes the specified recipient from the metastore. The caller must be the owner of the recipient. - + :param name: str Name of the recipient. - - + + .. py:method:: get(name: str) -> RecipientInfo @@ -96,12 +96,12 @@ w.recipients.delete(name=created.name) Gets a share recipient from the metastore if: - + * the caller is the owner of the share recipient, or: * is a metastore admin - + :param name: str Name of the recipient. - + :returns: :class:`RecipientInfo` @@ -120,10 +120,10 @@ all = w.recipients.list(sharing.ListRecipientsRequest()) Gets an array of all share recipients within the current metastore where: - + * the caller is a metastore admin, or * the caller is the owner. There is no guarantee of a specific ordering of the elements in the array. - + :param data_recipient_global_metastore_id: str (optional) If not provided, all recipients will be returned. If no recipients exist with this ID, no results will be returned. @@ -137,7 +137,7 @@ from the response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`RecipientInfo` @@ -163,14 +163,14 @@ Refreshes the specified recipient's delta sharing authentication token with the provided token info. The caller must be the owner of the recipient. - + :param name: str The name of the Recipient. :param existing_token_expire_in_seconds: int The expiration time of the bearer token in ISO 8601 format. This will set the expiration_time of existing token only to a smaller timestamp, it cannot extend the expiration_time. Use 0 to expire the existing token immediately, negative number will return an error. - + :returns: :class:`RecipientInfo` @@ -196,7 +196,7 @@ Gets the share permissions for the specified Recipient. The caller must be a metastore admin or the owner of the Recipient. - + :param name: str The name of the Recipient. :param max_results: int (optional) @@ -209,7 +209,7 @@ unset from the response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: :class:`GetRecipientSharePermissionsResponse` @@ -236,7 +236,7 @@ Updates an existing recipient in the metastore. The caller must be a metastore admin or the owner of the recipient. If the recipient name will be updated, the user must be both a metastore admin and the owner of the recipient. - + :param name: str Name of the recipient. :param comment: str (optional) @@ -253,6 +253,6 @@ Recipient properties as map of string key-value pairs. When provided in update request, the specified properties will override the existing properties. To add and remove properties, one would need to perform a read-modify-write. - + :returns: :class:`RecipientInfo` \ No newline at end of file diff --git a/docs/workspace/sharing/shares.rst b/docs/workspace/sharing/shares.rst index 1bf63fdf7..6afd11e0a 100644 --- a/docs/workspace/sharing/shares.rst +++ b/docs/workspace/sharing/shares.rst @@ -29,25 +29,25 @@ Creates a new share for data objects. Data objects can be added after creation with **update**. The caller must be a metastore admin or have the **CREATE_SHARE** privilege on the metastore. - + :param name: str Name of the share. :param comment: str (optional) User-provided free-form text description. :param storage_root: str (optional) Storage root URL for the share. - + :returns: :class:`ShareInfo` .. py:method:: delete(name: str) Deletes a data object share from the metastore. The caller must be an owner of the share. - + :param name: str The name of the share. - - + + .. py:method:: get(name: str [, include_shared_data: Optional[bool]]) -> ShareInfo @@ -72,12 +72,12 @@ Gets a data object share from the metastore. The caller must be a metastore admin or the owner of the share. - + :param name: str The name of the share. :param include_shared_data: bool (optional) Query for data to include in the share. - + :returns: :class:`ShareInfo` @@ -97,7 +97,7 @@ Gets an array of data object shares from the metastore. The caller must be a metastore admin or the owner of the share. There is no guarantee of a specific ordering of the elements in the array. - + :param max_results: int (optional) Maximum number of shares to return. - when set to 0, the page length is set to a server configured value (recommended); - when set to a value greater than 0, the page length is the minimum of this @@ -108,7 +108,7 @@ response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: Iterator over :class:`ShareInfo` @@ -116,7 +116,7 @@ Gets the permissions for a data share from the metastore. The caller must be a metastore admin or the owner of the share. - + :param name: str The name of the share. :param max_results: int (optional) @@ -129,7 +129,7 @@ unset from the response. :param page_token: str (optional) Opaque pagination token to go to next page based on previous query. - + :returns: :class:`GetSharePermissionsResponse` @@ -187,20 +187,20 @@ Updates the share with the changes and data objects in the request. The caller must be the owner of the share or a metastore admin. - + When the caller is a metastore admin, only the __owner__ field can be updated. - + In the case the share name is changed, **updateShare** requires that the caller is the owner of the share and has the CREATE_SHARE privilege. - + If there are notebook files in the share, the __storage_root__ field cannot be updated. - + For each table that is added through this method, the share owner must also have **SELECT** privilege on the table. This privilege must be maintained indefinitely for recipients to be able to access the table. Typically, you should use a group as the share owner. - + Table removals through **update** do not require additional privileges. - + :param name: str The name of the share. :param comment: str (optional) @@ -213,7 +213,7 @@ Storage root URL for the share. :param updates: List[:class:`SharedDataObjectUpdate`] (optional) Array of shared data object updates. - + :returns: :class:`ShareInfo` @@ -221,16 +221,16 @@ Updates the permissions for a data share in the metastore. The caller must be a metastore admin or an owner of the share. - + For new recipient grants, the user must also be the recipient owner or metastore admin. recipient revocations do not require additional privileges. - + :param name: str The name of the share. :param changes: List[:class:`PermissionsChange`] (optional) Array of permissions change objects. :param omit_permissions_list: bool (optional) Optional. Whether to return the latest permissions list of the share in the response. - + :returns: :class:`UpdateSharePermissionsResponse` \ No newline at end of file diff --git a/docs/workspace/sql/alerts.rst b/docs/workspace/sql/alerts.rst index c3efdb347..a90e01a88 100644 --- a/docs/workspace/sql/alerts.rst +++ b/docs/workspace/sql/alerts.rst @@ -51,12 +51,12 @@ w.alerts.delete(id=alert.id) Creates an alert. - + :param alert: :class:`CreateAlertRequestAlert` (optional) :param auto_resolve_display_name: bool (optional) If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the alert's display name conflicts with an existing alert's display name. - + :returns: :class:`Alert` @@ -65,10 +65,10 @@ Moves an alert to the trash. Trashed alerts immediately disappear from searches and list views, and can no longer trigger. You can restore a trashed alert through the UI. A trashed alert is permanently deleted after 30 days. - + :param id: str - - + + .. py:method:: get(id: str) -> Alert @@ -115,9 +115,9 @@ w.alerts.delete(id=alert.id) Gets an alert. - + :param id: str - + :returns: :class:`Alert` @@ -137,10 +137,10 @@ Gets a list of alerts accessible to the user, ordered by creation time. **Warning:** Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ListAlertsResponseAlert` @@ -192,7 +192,7 @@ w.alerts.delete(id=alert.id) Updates an alert. - + :param id: str :param update_mask: str The field mask must be a single string, with multiple fields separated by commas (no spaces). The @@ -200,7 +200,7 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. @@ -208,6 +208,6 @@ :param auto_resolve_display_name: bool (optional) If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the alert's display name conflicts with an existing alert's display name. - + :returns: :class:`Alert` \ No newline at end of file diff --git a/docs/workspace/sql/alerts_legacy.rst b/docs/workspace/sql/alerts_legacy.rst index 199c1dba0..72fdfa42f 100644 --- a/docs/workspace/sql/alerts_legacy.rst +++ b/docs/workspace/sql/alerts_legacy.rst @@ -8,22 +8,22 @@ periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create. - + **Note**: A new version of the Databricks SQL API is now available. Please see the latest version. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: create(name: str, options: AlertOptions, query_id: str [, parent: Optional[str], rearm: Optional[int]]) -> LegacyAlert Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies users or notification destinations if the condition was met. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/create instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param name: str Name of the alert. :param options: :class:`AlertOptions` @@ -35,7 +35,7 @@ :param rearm: int (optional) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If `null`, alert will never be triggered again. - + :returns: :class:`LegacyAlert` @@ -43,53 +43,53 @@ Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to the trash. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/delete instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param alert_id: str - - + + .. py:method:: get(alert_id: str) -> LegacyAlert Gets an alert. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/get instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param alert_id: str - + :returns: :class:`LegacyAlert` .. py:method:: list() -> Iterator[LegacyAlert] Gets a list of alerts. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/list instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - - + + :returns: Iterator over :class:`LegacyAlert` .. py:method:: update(alert_id: str, name: str, options: AlertOptions, query_id: str [, rearm: Optional[int]]) Updates an alert. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:alerts/update instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param alert_id: str :param name: str Name of the alert. @@ -100,6 +100,6 @@ :param rearm: int (optional) Number of seconds after being triggered before the alert rearms itself and can be triggered again. If `null`, alert will never be triggered again. - - + + \ No newline at end of file diff --git a/docs/workspace/sql/alerts_v2.rst b/docs/workspace/sql/alerts_v2.rst index 96ee533fb..c8b5e8a05 100644 --- a/docs/workspace/sql/alerts_v2.rst +++ b/docs/workspace/sql/alerts_v2.rst @@ -9,28 +9,28 @@ .. py:method:: create_alert(alert: AlertV2) -> AlertV2 Create Alert - + :param alert: :class:`AlertV2` - + :returns: :class:`AlertV2` .. py:method:: get_alert(id: str) -> AlertV2 Gets an alert. - + :param id: str - + :returns: :class:`AlertV2` .. py:method:: list_alerts( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[AlertV2] Gets a list of alerts accessible to the user, ordered by creation time. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`AlertV2` @@ -39,16 +39,16 @@ Moves an alert to the trash. Trashed alerts immediately disappear from list views, and can no longer trigger. You can restore a trashed alert through the UI. A trashed alert is permanently deleted after 30 days. - + :param id: str - - + + .. py:method:: update_alert(id: str, alert: AlertV2, update_mask: str) -> AlertV2 Update alert - + :param id: str UUID identifying the alert. :param alert: :class:`AlertV2` @@ -58,10 +58,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`AlertV2` \ No newline at end of file diff --git a/docs/workspace/sql/dashboard_widgets.rst b/docs/workspace/sql/dashboard_widgets.rst index aa1e61b75..282bfe639 100644 --- a/docs/workspace/sql/dashboard_widgets.rst +++ b/docs/workspace/sql/dashboard_widgets.rst @@ -10,7 +10,7 @@ .. py:method:: create(dashboard_id: str, options: WidgetOptions, width: int [, text: Optional[str], visualization_id: Optional[str]]) -> Widget Adds a widget to a dashboard - + :param dashboard_id: str Dashboard ID returned by :method:dashboards/create. :param options: :class:`WidgetOptions` @@ -21,24 +21,24 @@ contains a visualization in the `visualization` field. :param visualization_id: str (optional) Query Vizualization ID returned by :method:queryvisualizations/create. - + :returns: :class:`Widget` .. py:method:: delete(id: str) Removes a widget from a dashboard - + :param id: str Widget ID returned by :method:dashboardwidgets/create - - + + .. py:method:: update(id: str, dashboard_id: str, options: WidgetOptions, width: int [, text: Optional[str], visualization_id: Optional[str]]) -> Widget Updates an existing widget - + :param id: str Widget ID returned by :method:dashboardwidgets/create :param dashboard_id: str @@ -51,6 +51,6 @@ contains a visualization in the `visualization` field. :param visualization_id: str (optional) Query Vizualization ID returned by :method:queryvisualizations/create. - + :returns: :class:`Widget` \ No newline at end of file diff --git a/docs/workspace/sql/dashboards.rst b/docs/workspace/sql/dashboards.rst index fdb12a174..8569a09e8 100644 --- a/docs/workspace/sql/dashboards.rst +++ b/docs/workspace/sql/dashboards.rst @@ -32,10 +32,10 @@ Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared. - + :param dashboard_id: str - - + + .. py:method:: get(dashboard_id: str) -> Dashboard @@ -59,9 +59,9 @@ w.dashboards.delete(dashboard_id=created.id) Returns a JSON representation of a dashboard object, including its visualization and query objects. - + :param dashboard_id: str - + :returns: :class:`Dashboard` @@ -80,10 +80,10 @@ all = w.dashboards.list(sql.ListDashboardsRequest()) Fetch a paginated list of dashboard objects. - + **Warning**: Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban. - + :param order: :class:`ListOrder` (optional) Name of dashboard attribute to order by. :param page: int (optional) @@ -92,7 +92,7 @@ Number of dashboards to return per page. :param q: str (optional) Full text search term. - + :returns: Iterator over :class:`Dashboard` @@ -117,19 +117,19 @@ w.dashboards.delete(dashboard_id=created.id) A restored dashboard appears in list views and searches and can be shared. - + :param dashboard_id: str - - + + .. py:method:: update(dashboard_id: str [, name: Optional[str], run_as_role: Optional[RunAsRole], tags: Optional[List[str]]]) -> Dashboard Modify this dashboard definition. This operation only affects attributes of the dashboard object. It does not add, modify, or remove widgets. - + **Note**: You cannot undo this operation. - + :param dashboard_id: str :param name: str (optional) The title of this dashboard that appears in list views and at the top of the dashboard page. @@ -137,6 +137,6 @@ Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" behavior) :param tags: List[str] (optional) - + :returns: :class:`Dashboard` \ No newline at end of file diff --git a/docs/workspace/sql/data_sources.rst b/docs/workspace/sql/data_sources.rst index 6e85f43ed..29a42a0af 100644 --- a/docs/workspace/sql/data_sources.rst +++ b/docs/workspace/sql/data_sources.rst @@ -7,13 +7,13 @@ This API is provided to assist you in making new query objects. When creating a query object, you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. If you don't already know the `data_source_id` for your desired SQL warehouse, this API will help you find it. - + This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or `grep` to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL. - + **Note**: A new version of the Databricks SQL API is now available. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: list() -> Iterator[DataSource] @@ -32,12 +32,12 @@ Retrieves a full list of SQL warehouses available in this workspace. All fields that appear in this API response are enumerated for clarity. However, you need only a SQL warehouse's `id` to create new queries against it. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:warehouses/list instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - - + + :returns: Iterator over :class:`DataSource` \ No newline at end of file diff --git a/docs/workspace/sql/dbsql_permissions.rst b/docs/workspace/sql/dbsql_permissions.rst index c05c82012..e4d3e076d 100644 --- a/docs/workspace/sql/dbsql_permissions.rst +++ b/docs/workspace/sql/dbsql_permissions.rst @@ -7,33 +7,33 @@ The SQL Permissions API is similar to the endpoints of the :method:permissions/set. However, this exposes only one endpoint, which gets the Access Control List for a given object. You cannot modify any permissions using this API. - + There are three levels of permission: - + - `CAN_VIEW`: Allows read-only access - + - `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`) - + - `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify permissions (superset of `CAN_RUN`) - + **Note**: A new version of the Databricks SQL API is now available. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: get(object_type: ObjectTypePlural, object_id: str) -> GetResponse Gets a JSON representation of the access control list (ACL) for a specified object. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:workspace/getpermissions instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param object_type: :class:`ObjectTypePlural` The type of object permissions to check. :param object_id: str Object ID. An ACL is returned for the object with this UUID. - + :returns: :class:`GetResponse` @@ -41,36 +41,36 @@ Sets the access control list (ACL) for a specified object. This operation will complete rewrite the ACL. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:workspace/setpermissions instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param object_type: :class:`ObjectTypePlural` The type of object permission to set. :param object_id: str Object ID. The ACL for the object with this UUID is overwritten by this request's POST content. :param access_control_list: List[:class:`AccessControl`] (optional) - + :returns: :class:`SetResponse` .. py:method:: transfer_ownership(object_type: OwnableObjectType, object_id: TransferOwnershipObjectId [, new_owner: Optional[str]]) -> Success Transfers ownership of a dashboard, query, or alert to an active user. Requires an admin API key. - + **Note**: A new version of the Databricks SQL API is now available. For queries and alerts, please use :method:queries/update and :method:alerts/update respectively instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param object_type: :class:`OwnableObjectType` The type of object on which to change ownership. :param object_id: :class:`TransferOwnershipObjectId` The ID of the object on which to change ownership. :param new_owner: str (optional) Email address for the new owner, who must exist in the workspace. - + :returns: :class:`Success` \ No newline at end of file diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index f0081b3f2..b907440d4 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -37,12 +37,12 @@ w.queries.delete(id=query.id) Creates a query. - + :param auto_resolve_display_name: bool (optional) If true, automatically resolve query display name conflicts. Otherwise, fail the request if the query's display name conflicts with an existing query's display name. :param query: :class:`CreateQueryRequestQuery` (optional) - + :returns: :class:`Query` @@ -51,10 +51,10 @@ Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and cannot be used for alerts. You can restore a trashed query through the UI. A trashed query is permanently deleted after 30 days. - + :param id: str - - + + .. py:method:: get(id: str) -> Query @@ -88,9 +88,9 @@ w.queries.delete(id=query.id) Gets a query. - + :param id: str - + :returns: :class:`Query` @@ -98,21 +98,21 @@ Gets a list of queries accessible to the user, ordered by creation time. **Warning:** Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban. - + :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`ListQueryObjectsResponseQuery` .. py:method:: list_visualizations(id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Visualization] Gets a list of visualizations on a query. - + :param id: str :param page_size: int (optional) :param page_token: str (optional) - + :returns: Iterator over :class:`Visualization` @@ -155,7 +155,7 @@ w.queries.delete(id=query.id) Updates a query. - + :param id: str :param update_mask: str The field mask must be a single string, with multiple fields separated by commas (no spaces). The @@ -163,7 +163,7 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. @@ -171,6 +171,6 @@ If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the alert's display name conflicts with an existing alert's display name. :param query: :class:`UpdateQueryRequestQuery` (optional) - + :returns: :class:`Query` \ No newline at end of file diff --git a/docs/workspace/sql/queries_legacy.rst b/docs/workspace/sql/queries_legacy.rst index 61ff085a7..cf19da5c3 100644 --- a/docs/workspace/sql/queries_legacy.rst +++ b/docs/workspace/sql/queries_legacy.rst @@ -7,32 +7,32 @@ These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the `sql_task` type of the Jobs API, e.g. :method:jobs/create. - + **Note**: A new version of the Databricks SQL API is now available. Please see the latest version. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: create( [, data_source_id: Optional[str], description: Optional[str], name: Optional[str], options: Optional[Any], parent: Optional[str], query: Optional[str], run_as_role: Optional[RunAsRole], tags: Optional[List[str]]]) -> LegacyQuery Creates a new query definition. Queries created with this endpoint belong to the authenticated user making the request. - + The `data_source_id` field specifies the ID of the SQL warehouse to run this query against. You can use the Data Sources API to see a complete list of available SQL warehouses. Or you can copy the `data_source_id` from an existing query. - + **Note**: You cannot add a visualization until you create the query. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/create instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param data_source_id: str (optional) Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID. [Learn more] - + [Learn more]: https://docs.databricks.com/api/workspace/datasources/list :param description: str (optional) General description that conveys additional information about this query such as usage notes. @@ -50,7 +50,7 @@ Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" behavior) :param tags: List[str] (optional) - + :returns: :class:`LegacyQuery` @@ -58,57 +58,57 @@ Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is deleted after 30 days. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/delete instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param query_id: str - - + + .. py:method:: get(query_id: str) -> LegacyQuery Retrieve a query object definition along with contextual permissions information about the currently authenticated user. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/get instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param query_id: str - + :returns: :class:`LegacyQuery` .. py:method:: list( [, order: Optional[str], page: Optional[int], page_size: Optional[int], q: Optional[str]]) -> Iterator[LegacyQuery] Gets a list of queries. Optionally, this list can be filtered by a search term. - + **Warning**: Calling this API concurrently 10 or more times could result in throttling, service degradation, or a temporary ban. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/list instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param order: str (optional) Name of query attribute to order by. Default sort order is ascending. Append a dash (`-`) to order descending instead. - + - `name`: The name of the query. - + - `created_at`: The timestamp the query was created. - + - `runtime`: The time it took to run this query. This is blank for parameterized queries. A blank value is treated as the highest value for sorting. - + - `executed_at`: The timestamp when the query was last run. - + - `created_by`: The user name of the user that created the query. :param page: int (optional) Page number to retrieve. @@ -116,7 +116,7 @@ Number of queries to return per page. :param q: str (optional) Full text search term - + :returns: Iterator over :class:`LegacyQuery` @@ -124,33 +124,33 @@ Restore a query that has been moved to the trash. A restored query appears in list views and searches. You can use restored queries for alerts. - + **Note**: A new version of the Databricks SQL API is now available. Please see the latest version. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param query_id: str - - + + .. py:method:: update(query_id: str [, data_source_id: Optional[str], description: Optional[str], name: Optional[str], options: Optional[Any], query: Optional[str], run_as_role: Optional[RunAsRole], tags: Optional[List[str]]]) -> LegacyQuery Modify this query definition. - + **Note**: You cannot undo this operation. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queries/update instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param query_id: str :param data_source_id: str (optional) Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID. [Learn more] - + [Learn more]: https://docs.databricks.com/api/workspace/datasources/list :param description: str (optional) General description that conveys additional information about this query such as usage notes. @@ -166,6 +166,6 @@ Sets the **Run as** role for the object. Must be set to one of `"viewer"` (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" behavior) :param tags: List[str] (optional) - + :returns: :class:`LegacyQuery` \ No newline at end of file diff --git a/docs/workspace/sql/query_history.rst b/docs/workspace/sql/query_history.rst index 14d006928..1e16628ec 100644 --- a/docs/workspace/sql/query_history.rst +++ b/docs/workspace/sql/query_history.rst @@ -26,11 +26,11 @@ ) List the history of queries through SQL warehouses, and serverless compute. - + You can filter by user ID, warehouse ID, status, and time range. Most recently started queries are returned first (up to max_results in request). The pagination token returned in response can be used to list subsequent query statuses. - + :param filter_by: :class:`QueryFilter` (optional) An optional filter object to limit query history results. Accepts parameters such as user IDs, endpoint IDs, and statuses to narrow the returned data. In a URL, the parameters of this filter are @@ -44,6 +44,6 @@ A token that can be used to get the next page of results. The token can contains characters that need to be encoded before using it in a URL. For example, the character '+' needs to be replaced by %2B. This field is optional. - + :returns: :class:`ListQueriesResponse` \ No newline at end of file diff --git a/docs/workspace/sql/query_visualizations.rst b/docs/workspace/sql/query_visualizations.rst index 578a7598a..78021cce8 100644 --- a/docs/workspace/sql/query_visualizations.rst +++ b/docs/workspace/sql/query_visualizations.rst @@ -10,25 +10,25 @@ .. py:method:: create( [, visualization: Optional[CreateVisualizationRequestVisualization]]) -> Visualization Adds a visualization to a query. - + :param visualization: :class:`CreateVisualizationRequestVisualization` (optional) - + :returns: :class:`Visualization` .. py:method:: delete(id: str) Removes a visualization. - + :param id: str - - + + .. py:method:: update(id: str, update_mask: str [, visualization: Optional[UpdateVisualizationRequestVisualization]]) -> Visualization Updates a visualization. - + :param id: str :param update_mask: str The field mask must be a single string, with multiple fields separated by commas (no spaces). The @@ -36,11 +36,11 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. :param visualization: :class:`UpdateVisualizationRequestVisualization` (optional) - + :returns: :class:`Visualization` \ No newline at end of file diff --git a/docs/workspace/sql/query_visualizations_legacy.rst b/docs/workspace/sql/query_visualizations_legacy.rst index 56ebe9dfa..f36d54b58 100644 --- a/docs/workspace/sql/query_visualizations_legacy.rst +++ b/docs/workspace/sql/query_visualizations_legacy.rst @@ -6,21 +6,21 @@ This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace. Data structures may change over time. - + **Note**: A new version of the Databricks SQL API is now available. Please see the latest version. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html .. py:method:: create(options: Any, query_id: str, type: str [, description: Optional[str], name: Optional[str]]) -> LegacyVisualization Creates visualization in the query. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queryvisualizations/create instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param options: Any The options object varies widely from one visualization type to the next and is unsupported. Databricks does not recommend modifying visualization settings in JSON. @@ -32,34 +32,34 @@ A short description of this visualization. This is not displayed in the UI. :param name: str (optional) The name of the visualization that appears on dashboards and the query screen. - + :returns: :class:`LegacyVisualization` .. py:method:: delete(id: str) Removes a visualization from the query. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queryvisualizations/delete instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param id: str Widget ID returned by :method:queryvisualizations/create - - + + .. py:method:: update(id: str [, created_at: Optional[str], description: Optional[str], name: Optional[str], options: Optional[Any], query: Optional[LegacyQuery], type: Optional[str], updated_at: Optional[str]]) -> LegacyVisualization Updates visualization in the query. - + **Note**: A new version of the Databricks SQL API is now available. Please use :method:queryvisualizations/update instead. [Learn more] - + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - + :param id: str The UUID for this visualization. :param created_at: str (optional) @@ -74,6 +74,6 @@ :param type: str (optional) The type of visualization: chart, table, pivot table, and so on. :param updated_at: str (optional) - + :returns: :class:`LegacyVisualization` \ No newline at end of file diff --git a/docs/workspace/sql/redash_config.rst b/docs/workspace/sql/redash_config.rst index 2f29b4d75..2ec9c8591 100644 --- a/docs/workspace/sql/redash_config.rst +++ b/docs/workspace/sql/redash_config.rst @@ -9,7 +9,7 @@ .. py:method:: get_config() -> ClientConfig Read workspace configuration for Redash-v2. - - + + :returns: :class:`ClientConfig` \ No newline at end of file diff --git a/docs/workspace/sql/statement_execution.rst b/docs/workspace/sql/statement_execution.rst index 12bd8ba7a..c311b2e37 100644 --- a/docs/workspace/sql/statement_execution.rst +++ b/docs/workspace/sql/statement_execution.rst @@ -6,13 +6,13 @@ The Databricks SQL Statement Execution API can be used to execute SQL statements on a SQL warehouse and fetch the result. - + **Getting started** - + We suggest beginning with the [Databricks SQL Statement Execution API tutorial]. - + **Overview of statement execution and result fetching** - + Statement execution begins by issuing a :method:statementexecution/executeStatement request with a valid SQL statement and warehouse ID, along with optional parameters such as the data catalog and output format. If no other parameters are specified, the server will wait for up to 10s before returning a response. If @@ -20,7 +20,7 @@ array and metadata. Otherwise, if no result is available after the 10s timeout expired, the response will provide the statement ID that can be used to poll for results by using a :method:statementexecution/getStatement request. - + You can specify whether the call should behave synchronously, asynchronously or start synchronously with a fallback to asynchronous execution. This is controlled with the `wait_timeout` and `on_wait_timeout` settings. If `wait_timeout` is set between 5-50 seconds (default: 10s), the call waits for results up to @@ -28,7 +28,7 @@ statement ID. The `on_wait_timeout` setting specifies what should happen when the timeout is reached while the statement execution has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to `CANCEL`, which cancels the statement. - + In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call waits up to 30 seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 30 seconds, the execution is canceled and the call returns @@ -40,38 +40,38 @@ seconds; if the statement execution finishes within this time, the result data is returned directly in the response. If the execution takes longer than 10 seconds, a statement ID is returned. The statement ID can be used to fetch status and results in the same way as in the asynchronous mode. - + Depending on the size, the result can be split into multiple chunks. If the statement execution is successful, the statement response contains a manifest and the first chunk of the result. The manifest contains schema information and provides metadata for each chunk in the result. Result chunks can be retrieved by index with :method:statementexecution/getStatementResultChunkN which may be called in any order and in parallel. For sequential fetching, each chunk, apart from the last, also contains a `next_chunk_index` and `next_chunk_internal_link` that point to the next chunk. - + A statement can be canceled with :method:statementexecution/cancelExecution. - + **Fetching result data: format and disposition** - + To specify the format of the result data, use the `format` field, which can be set to one of the following options: `JSON_ARRAY` (JSON), `ARROW_STREAM` ([Apache Arrow Columnar]), or `CSV`. - + There are two ways to receive statement results, controlled by the `disposition` setting, which can be either `INLINE` or `EXTERNAL_LINKS`: - + - `INLINE`: In this mode, the result data is directly included in the response. It's best suited for smaller results. This mode can only be used with the `JSON_ARRAY` format. - + - `EXTERNAL_LINKS`: In this mode, the response provides links that can be used to download the result data in chunks separately. This approach is ideal for larger results and offers higher throughput. This mode can be used with all the formats: `JSON_ARRAY`, `ARROW_STREAM`, and `CSV`. - + By default, the API uses `format=JSON_ARRAY` and `disposition=INLINE`. - + **Limits and limitations** - + Note: The byte limit for INLINE disposition is based on internal storage metrics and will not exactly match the byte count of the actual payload. - + - Statements with `disposition=INLINE` are limited to 25 MiB and will fail when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` are limited to 100 GiB. Result sets larger than this limit will be truncated. Truncation is indicated by the `truncated` field in the result manifest. - The maximum @@ -84,33 +84,32 @@ once every 15 minutes. - The results are only available for one hour after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL Execution API to cancel it. - + [Apache Arrow Columnar]: https://arrow.apache.org/overview/ [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html - .. py:method:: cancel_execution(statement_id: str) Requests that an executing statement be canceled. Callers must poll for status to see the terminal state. - + :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. - - + + .. py:method:: execute_statement(statement: str, warehouse_id: str [, byte_limit: Optional[int], catalog: Optional[str], disposition: Optional[Disposition], format: Optional[Format], on_wait_timeout: Optional[ExecuteStatementRequestOnWaitTimeout], parameters: Optional[List[StatementParameterListItem]], row_limit: Optional[int], schema: Optional[str], wait_timeout: Optional[str]]) -> StatementResponse Execute a SQL statement - + :param statement: str The SQL statement to execute. The statement can optionally be parameterized, see `parameters`. The maximum query text size is 16 MiB. :param warehouse_id: str Warehouse upon which to execute a statement. See also [What are SQL warehouses?] - + [What are SQL warehouses?]: https://docs.databricks.com/sql/admin/warehouse-type.html :param byte_limit: int (optional) Applies the given byte limit to the statement's result size. Byte counts are based on internal data @@ -120,37 +119,37 @@ explcitly set. :param catalog: str (optional) Sets default catalog for statement execution, similar to [`USE CATALOG`] in SQL. - + [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html :param disposition: :class:`Disposition` (optional) :param format: :class:`Format` (optional) Statement execution supports three result formats: `JSON_ARRAY` (default), `ARROW_STREAM`, and `CSV`. - + Important: The formats `ARROW_STREAM` and `CSV` are supported only with `EXTERNAL_LINKS` disposition. `JSON_ARRAY` is supported in `INLINE` and `EXTERNAL_LINKS` disposition. - + When specifying `format=JSON_ARRAY`, result data will be formatted as an array of arrays of values, where each value is either the *string representation* of a value, or `null`. For example, the output of `SELECT concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)` would look like this: - + ``` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null ], ] ``` - + When specifying `format=JSON_ARRAY` and `disposition=EXTERNAL_LINKS`, each chunk in the result contains compact JSON with no indentation or extra whitespace. - + When specifying `format=ARROW_STREAM` and `disposition=EXTERNAL_LINKS`, each chunk in the result will be formatted as Apache Arrow Stream. See the [Apache Arrow streaming format]. - + When specifying `format=CSV` and `disposition=EXTERNAL_LINKS`, each chunk in the result will be a CSV according to [RFC 4180] standard. All the columns values will have *string representation* similar to the `JSON_ARRAY` format, and `null` values will be encoded as “null”. Only the first chunk in the result would contain a header row with column names. For example, the output of `SELECT concat('id-', id) AS strCol, id AS intCol, null as nullCol FROM range(3)` would look like this: - + ``` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null ``` - + [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180 :param on_wait_timeout: :class:`ExecuteStatementRequestOnWaitTimeout` (optional) @@ -165,27 +164,27 @@ of a name, a value, and optionally a type. To represent a NULL value, the `value` field may be omitted or set to `null` explicitly. If the `type` field is omitted, the value is interpreted as a string. - + If the type is given, parameters will be checked for type correctness according to the given type. A value is correct if the provided string can be converted to the requested type using the `cast` function. The exact semantics are described in the section [`cast` function] of the SQL language reference. - + For example, the following statement contains two parameters, `my_name` and `my_date`: - + SELECT * FROM my_table WHERE name = :my_name AND date = :my_date - + The parameters can be passed in the request body as follows: - + { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date", "parameters": [ { "name": "my_name", "value": "the name" }, { "name": "my_date", "value": "2020-01-01", "type": "DATE" } ] } - + Currently, positional parameters denoted by a `?` marker are not supported by the Databricks SQL Statement Execution API. - + Also see the section [Parameter markers] of the SQL language reference. - + [Parameter markers]: https://docs.databricks.com/sql/language-manual/sql-ref-parameter-marker.html [`cast` function]: https://docs.databricks.com/sql/language-manual/functions/cast.html :param row_limit: int (optional) @@ -194,22 +193,22 @@ the limit or not. :param schema: str (optional) Sets default schema for statement execution, similar to [`USE SCHEMA`] in SQL. - + [`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html :param wait_timeout: str (optional) The time in seconds the call will wait for the statement's result set as `Ns`, where `N` can be set to 0 or to a value between 5 and 50. - + When set to `0s`, the statement will execute in asynchronous mode and the call will not wait for the execution to finish. In this case, the call returns directly with `PENDING` state and a statement ID which can be used for polling with :method:statementexecution/getStatement. - + When set between 5 and 50 seconds, the call will behave synchronously up to this timeout and wait for the statement execution to finish. If the execution finishes within this time, the call returns immediately with a manifest and result data (or a `FAILED` state in case of an execution error). If the statement takes longer to execute, `on_wait_timeout` determines what should happen after the timeout is reached. - + :returns: :class:`StatementResponse` @@ -220,13 +219,13 @@ statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state, the statement is removed from the warehouse and further calls will receive an HTTP 404 response. - + **NOTE** This call currently might take up to 5 seconds to get the latest status and result. - + :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. - + :returns: :class:`StatementResponse` @@ -238,11 +237,11 @@ can be used to fetch subsequent chunks. The response structure is identical to the nested `result` element described in the :method:statementexecution/getStatement request, and similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple iteration through the result set. - + :param statement_id: str The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. :param chunk_index: int - + :returns: :class:`ResultData` \ No newline at end of file diff --git a/docs/workspace/sql/warehouses.rst b/docs/workspace/sql/warehouses.rst index 94911bc1e..6df2028e8 100644 --- a/docs/workspace/sql/warehouses.rst +++ b/docs/workspace/sql/warehouses.rst @@ -35,14 +35,14 @@ w.warehouses.delete(id=created.id) Creates a new SQL warehouse. - + :param auto_stop_mins: int (optional) The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - + Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for non-serverless warehouses - 0 indicates no autostop. - + Defaults to 120 mins :param channel: :class:`Channel` (optional) Channel Details @@ -50,14 +50,14 @@ Size of the clusters allocated for this warehouse. Increasing the size of a spark cluster allows you to run larger queries on it. If you want to increase the number of concurrent queries, please tune max_num_clusters. - + Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large - 2X-Large - 3X-Large - 4X-Large :param creator_name: str (optional) warehouse creator name :param enable_photon: bool (optional) Configures whether the warehouse should use Photon optimized clusters. - + Defaults to false. :param enable_serverless_compute: bool (optional) Configures whether the warehouse should use serverless compute @@ -65,30 +65,30 @@ Deprecated. Instance profile used to pass IAM role to the cluster :param max_num_clusters: int (optional) Maximum number of clusters that the autoscaler will create to handle concurrent queries. - + Supported values: - Must be >= min_num_clusters - Must be <= 30. - + Defaults to min_clusters if unset. :param min_num_clusters: int (optional) Minimum number of available clusters that will be maintained for this SQL warehouse. Increasing this will ensure that a larger number of clusters are always running and therefore may reduce the cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager. - + Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) - + Defaults to 1 :param name: str (optional) Logical name for the cluster. - + Supported values: - Must be unique within an org. - Must be less than 100 characters. :param spot_instance_policy: :class:`SpotInstancePolicy` (optional) :param tags: :class:`EndpointTags` (optional) A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. - + Supported values: - Number of tags < 45. :param warehouse_type: :class:`CreateWarehouseRequestWarehouseType` (optional) - + :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. See :method:wait_get_warehouse_running for more details. @@ -100,11 +100,11 @@ .. py:method:: delete(id: str) Deletes a SQL warehouse. - + :param id: str Required. Id of the SQL warehouse. - - + + .. py:method:: edit(id: str [, auto_stop_mins: Optional[int], channel: Optional[Channel], cluster_size: Optional[str], creator_name: Optional[str], enable_photon: Optional[bool], enable_serverless_compute: Optional[bool], instance_profile_arn: Optional[str], max_num_clusters: Optional[int], min_num_clusters: Optional[int], name: Optional[str], spot_instance_policy: Optional[SpotInstancePolicy], tags: Optional[EndpointTags], warehouse_type: Optional[EditWarehouseRequestWarehouseType]]) -> Wait[GetWarehouseResponse] @@ -143,15 +143,15 @@ w.warehouses.delete(id=created.id) Updates the configuration for a SQL warehouse. - + :param id: str Required. Id of the warehouse to configure. :param auto_stop_mins: int (optional) The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - + Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. - + Defaults to 120 mins :param channel: :class:`Channel` (optional) Channel Details @@ -159,14 +159,14 @@ Size of the clusters allocated for this warehouse. Increasing the size of a spark cluster allows you to run larger queries on it. If you want to increase the number of concurrent queries, please tune max_num_clusters. - + Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large - 2X-Large - 3X-Large - 4X-Large :param creator_name: str (optional) warehouse creator name :param enable_photon: bool (optional) Configures whether the warehouse should use Photon optimized clusters. - + Defaults to false. :param enable_serverless_compute: bool (optional) Configures whether the warehouse should use serverless compute. @@ -174,30 +174,30 @@ Deprecated. Instance profile used to pass IAM role to the cluster :param max_num_clusters: int (optional) Maximum number of clusters that the autoscaler will create to handle concurrent queries. - + Supported values: - Must be >= min_num_clusters - Must be <= 30. - + Defaults to min_clusters if unset. :param min_num_clusters: int (optional) Minimum number of available clusters that will be maintained for this SQL warehouse. Increasing this will ensure that a larger number of clusters are always running and therefore may reduce the cold start time for new queries. This is similar to reserved vs. revocable cores in a resource manager. - + Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) - + Defaults to 1 :param name: str (optional) Logical name for the cluster. - + Supported values: - Must be unique within an org. - Must be less than 100 characters. :param spot_instance_policy: :class:`SpotInstancePolicy` (optional) :param tags: :class:`EndpointTags` (optional) A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. - + Supported values: - Number of tags < 45. :param warehouse_type: :class:`EditWarehouseRequestWarehouseType` (optional) - + :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. See :method:wait_get_warehouse_running for more details. @@ -236,20 +236,20 @@ w.warehouses.delete(id=created.id) Gets the information for a single SQL warehouse. - + :param id: str Required. Id of the SQL warehouse. - + :returns: :class:`GetWarehouseResponse` .. py:method:: get_permission_levels(warehouse_id: str) -> GetWarehousePermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param warehouse_id: str The SQL warehouse for which to get or manage permissions. - + :returns: :class:`GetWarehousePermissionLevelsResponse` @@ -257,18 +257,18 @@ Gets the permissions of a SQL warehouse. SQL warehouses can inherit permissions from their root object. - + :param warehouse_id: str The SQL warehouse for which to get or manage permissions. - + :returns: :class:`WarehousePermissions` .. py:method:: get_workspace_warehouse_config() -> GetWorkspaceWarehouseConfigResponse Gets the workspace level configuration that is shared by all SQL warehouses in a workspace. - - + + :returns: :class:`GetWorkspaceWarehouseConfigResponse` @@ -287,11 +287,11 @@ all = w.warehouses.list(sql.ListWarehousesRequest()) Lists all SQL warehouses that a user has manager permissions on. - + :param run_as_user_id: int (optional) Service Principal which will be used to fetch the list of warehouses. If not specified, the user from the session header is used. - + :returns: Iterator over :class:`EndpointInfo` @@ -299,18 +299,18 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param warehouse_id: str The SQL warehouse for which to get or manage permissions. :param access_control_list: List[:class:`WarehouseAccessControlRequest`] (optional) - + :returns: :class:`WarehousePermissions` .. py:method:: set_workspace_warehouse_config( [, channel: Optional[Channel], config_param: Optional[RepeatedEndpointConfPairs], data_access_config: Optional[List[EndpointConfPair]], enabled_warehouse_types: Optional[List[WarehouseTypePair]], global_param: Optional[RepeatedEndpointConfPairs], google_service_account: Optional[str], instance_profile_arn: Optional[str], security_policy: Optional[SetWorkspaceWarehouseConfigRequestSecurityPolicy], sql_configuration_parameters: Optional[RepeatedEndpointConfPairs]]) Sets the workspace level configuration that is shared by all SQL warehouses in a workspace. - + :param channel: :class:`Channel` (optional) Optional: Channel selection details :param config_param: :class:`RepeatedEndpointConfPairs` (optional) @@ -333,17 +333,17 @@ Security policy for warehouses :param sql_configuration_parameters: :class:`RepeatedEndpointConfPairs` (optional) SQL configuration parameters - - + + .. py:method:: start(id: str) -> Wait[GetWarehouseResponse] Starts a SQL warehouse. - + :param id: str Required. Id of the SQL warehouse. - + :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. See :method:wait_get_warehouse_running for more details. @@ -355,10 +355,10 @@ .. py:method:: stop(id: str) -> Wait[GetWarehouseResponse] Stops a SQL warehouse. - + :param id: str Required. Id of the SQL warehouse. - + :returns: Long-running operation waiter for :class:`GetWarehouseResponse`. See :method:wait_get_warehouse_stopped for more details. @@ -371,11 +371,11 @@ Updates the permissions on a SQL warehouse. SQL warehouses can inherit permissions from their root object. - + :param warehouse_id: str The SQL warehouse for which to get or manage permissions. :param access_control_list: List[:class:`WarehouseAccessControlRequest`] (optional) - + :returns: :class:`WarehousePermissions` diff --git a/docs/workspace/tags/tag_policies.rst b/docs/workspace/tags/tag_policies.rst index 2be7f5360..d9aed39df 100644 --- a/docs/workspace/tags/tag_policies.rst +++ b/docs/workspace/tags/tag_policies.rst @@ -4,53 +4,53 @@ .. py:class:: TagPoliciesAPI - The Tag Policy API allows you to manage tag policies in Databricks. + The Tag Policy API allows you to manage policies for governed tags in Databricks. .. py:method:: create_tag_policy(tag_policy: TagPolicy) -> TagPolicy - Creates a new tag policy. - + Creates a new tag policy, making the associated tag key governed. + :param tag_policy: :class:`TagPolicy` - + :returns: :class:`TagPolicy` .. py:method:: delete_tag_policy(tag_key: str) - Deletes a tag policy by its key. - + Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. + :param tag_key: str - - + + .. py:method:: get_tag_policy(tag_key: str) -> TagPolicy - Gets a single tag policy by its key. - + Gets a single tag policy by its associated governed tag's key. + :param tag_key: str - + :returns: :class:`TagPolicy` .. py:method:: list_tag_policies( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[TagPolicy] - Lists all tag policies in the account. - + Lists the tag policies for all governed tags in the account. + :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than requested. If unspecified or set to 0, this defaults to 1000. The maximum value is 1000; values above 1000 will be coerced down to 1000. :param page_token: str (optional) An optional page token received from a previous list tag policies call. - + :returns: Iterator over :class:`TagPolicy` .. py:method:: update_tag_policy(tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy - Updates an existing tag policy. - + Updates an existing tag policy for a single governed tag. + :param tag_key: str :param tag_policy: :class:`TagPolicy` :param update_mask: str @@ -59,10 +59,10 @@ `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only the entire collection field can be specified. Field names must exactly match the resource field names. - + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - + :returns: :class:`TagPolicy` \ No newline at end of file diff --git a/docs/workspace/vectorsearch/vector_search_endpoints.rst b/docs/workspace/vectorsearch/vector_search_endpoints.rst index 47a8fa59a..870348ebb 100644 --- a/docs/workspace/vectorsearch/vector_search_endpoints.rst +++ b/docs/workspace/vectorsearch/vector_search_endpoints.rst @@ -9,14 +9,14 @@ .. py:method:: create_endpoint(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str]]) -> Wait[EndpointInfo] Create a new endpoint. - + :param name: str Name of the vector search endpoint :param endpoint_type: :class:`EndpointType` Type of endpoint :param budget_policy_id: str (optional) The budget policy id to be applied - + :returns: Long-running operation waiter for :class:`EndpointInfo`. See :method:wait_get_endpoint_vector_search_endpoint_online for more details. @@ -28,55 +28,55 @@ .. py:method:: delete_endpoint(endpoint_name: str) Delete a vector search endpoint. - + :param endpoint_name: str Name of the vector search endpoint - - + + .. py:method:: get_endpoint(endpoint_name: str) -> EndpointInfo Get details for a single vector search endpoint. - + :param endpoint_name: str Name of the endpoint - + :returns: :class:`EndpointInfo` .. py:method:: list_endpoints( [, page_token: Optional[str]]) -> Iterator[EndpointInfo] List all vector search endpoints in the workspace. - + :param page_token: str (optional) Token for pagination - + :returns: Iterator over :class:`EndpointInfo` .. py:method:: update_endpoint_budget_policy(endpoint_name: str, budget_policy_id: str) -> PatchEndpointBudgetPolicyResponse Update the budget policy of an endpoint - + :param endpoint_name: str Name of the vector search endpoint :param budget_policy_id: str The budget policy id to be applied (hima-sheth) TODO: remove this once we've migrated to usage policies - + :returns: :class:`PatchEndpointBudgetPolicyResponse` .. py:method:: update_endpoint_custom_tags(endpoint_name: str, custom_tags: List[CustomTag]) -> UpdateEndpointCustomTagsResponse Update the custom tags of an endpoint. - + :param endpoint_name: str Name of the vector search endpoint :param custom_tags: List[:class:`CustomTag`] The new custom tags for the vector search endpoint - + :returns: :class:`UpdateEndpointCustomTagsResponse` diff --git a/docs/workspace/vectorsearch/vector_search_indexes.rst b/docs/workspace/vectorsearch/vector_search_indexes.rst index 11417c9da..a5dd9af5f 100644 --- a/docs/workspace/vectorsearch/vector_search_indexes.rst +++ b/docs/workspace/vectorsearch/vector_search_indexes.rst @@ -6,7 +6,7 @@ **Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries. - + There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the Delta Table changes. - **Direct Vector Access Index**: An index that supports direct read and write of @@ -15,7 +15,7 @@ .. py:method:: create_index(name: str, endpoint_name: str, primary_key: str, index_type: VectorIndexType [, delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest], direct_access_index_spec: Optional[DirectAccessVectorIndexSpec]]) -> VectorIndex Create a new index. - + :param name: str Name of the index :param endpoint_name: str @@ -27,62 +27,62 @@ Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`. :param direct_access_index_spec: :class:`DirectAccessVectorIndexSpec` (optional) Specification for Direct Vector Access Index. Required if `index_type` is `DIRECT_ACCESS`. - + :returns: :class:`VectorIndex` .. py:method:: delete_data_vector_index(index_name: str, primary_keys: List[str]) -> DeleteDataVectorIndexResponse Handles the deletion of data from a specified vector index. - + :param index_name: str Name of the vector index where data is to be deleted. Must be a Direct Vector Access Index. :param primary_keys: List[str] List of primary keys for the data to be deleted. - + :returns: :class:`DeleteDataVectorIndexResponse` .. py:method:: delete_index(index_name: str) Delete an index. - + :param index_name: str Name of the index - - + + .. py:method:: get_index(index_name: str [, ensure_reranker_compatible: Optional[bool]]) -> VectorIndex Get an index. - + :param index_name: str Name of the index :param ensure_reranker_compatible: bool (optional) If true, the URL returned for the index is guaranteed to be compatible with the reranker. Currently this means we return the CP URL regardless of how the index is being accessed. If not set or set to false, the URL may still be compatible with the reranker depending on what URL we return. - + :returns: :class:`VectorIndex` .. py:method:: list_indexes(endpoint_name: str [, page_token: Optional[str]]) -> Iterator[MiniVectorIndex] List all indexes in the given endpoint. - + :param endpoint_name: str Name of the endpoint :param page_token: str (optional) Token for pagination - + :returns: Iterator over :class:`MiniVectorIndex` .. py:method:: query_index(index_name: str, columns: List[str] [, columns_to_rerank: Optional[List[str]], filters_json: Optional[str], num_results: Optional[int], query_text: Optional[str], query_type: Optional[str], query_vector: Optional[List[float]], reranker: Optional[RerankerConfig], score_threshold: Optional[float]]) -> QueryVectorIndexResponse Query the specified vector index. - + :param index_name: str Name of the vector index to query. :param columns: List[str] @@ -91,9 +91,9 @@ Column names used to retrieve data to send to the reranker. :param filters_json: str (optional) JSON string representing query filters. - + Example filters: - + - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to 5. - `{"id": 5}`: Filter for id equal to 5. @@ -109,7 +109,7 @@ :param reranker: :class:`RerankerConfig` (optional) :param score_threshold: float (optional) Threshold for the approximate nearest neighbor search. Defaults to 0.0. - + :returns: :class:`QueryVectorIndexResponse` @@ -117,14 +117,14 @@ Use `next_page_token` returned from previous `QueryVectorIndex` or `QueryVectorIndexNextPage` request to fetch next page of results. - + :param index_name: str Name of the vector index to query. :param endpoint_name: str (optional) Name of the endpoint. :param page_token: str (optional) Page token returned from previous `QueryVectorIndex` or `QueryVectorIndexNextPage` API. - + :returns: :class:`QueryVectorIndexResponse` @@ -132,35 +132,35 @@ Scan the specified vector index and return the first `num_results` entries after the exclusive `primary_key`. - + :param index_name: str Name of the vector index to scan. :param last_primary_key: str (optional) Primary key of the last entry returned in the previous scan. :param num_results: int (optional) Number of results to return. Defaults to 10. - + :returns: :class:`ScanVectorIndexResponse` .. py:method:: sync_index(index_name: str) Triggers a synchronization process for a specified vector index. - + :param index_name: str Name of the vector index to synchronize. Must be a Delta Sync Index. - - + + .. py:method:: upsert_data_vector_index(index_name: str, inputs_json: str) -> UpsertDataVectorIndexResponse Handles the upserting of data into a specified vector index. - + :param index_name: str Name of the vector index where data is to be upserted. Must be a Direct Vector Access Index. :param inputs_json: str JSON string representing the data to be upserted. - + :returns: :class:`UpsertDataVectorIndexResponse` \ No newline at end of file diff --git a/docs/workspace/workspace/git_credentials.rst b/docs/workspace/workspace/git_credentials.rst index 2dd9451c2..2bc2fc3d1 100644 --- a/docs/workspace/workspace/git_credentials.rst +++ b/docs/workspace/workspace/git_credentials.rst @@ -5,9 +5,9 @@ .. py:class:: GitCredentialsAPI Registers personal access token for Databricks to do operations on behalf of the user. - + See [more info]. - + [more info]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html .. py:method:: create(git_provider: str [, git_username: Optional[str], is_default_for_provider: Optional[bool], name: Optional[str], personal_access_token: Optional[str]]) -> CreateCredentialsResponse @@ -29,7 +29,7 @@ Creates a Git credential entry for the user. Only one Git credential per user is supported, so any attempts to create credentials if an entry already exists will fail. Use the PATCH endpoint to update existing credentials, or the DELETE endpoint to delete existing credentials. - + :param git_provider: str Git provider. This field is case-insensitive. The available Git providers are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, @@ -47,20 +47,20 @@ :param personal_access_token: str (optional) The personal access token used to authenticate to the corresponding Git provider. For certain providers, support may exist for other types of scoped access tokens. [Learn more]. - + [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html - + :returns: :class:`CreateCredentialsResponse` .. py:method:: delete(credential_id: int) Deletes the specified Git credential. - + :param credential_id: int The ID for the corresponding credential to access. - - + + .. py:method:: get(credential_id: int) -> GetCredentialsResponse @@ -82,10 +82,10 @@ w.git_credentials.delete(credential_id=cr.credential_id) Gets the Git credential with the specified credential ID. - + :param credential_id: int The ID for the corresponding credential to access. - + :returns: :class:`GetCredentialsResponse` @@ -103,8 +103,8 @@ list = w.git_credentials.list() Lists the calling user's Git credentials. One credential per user is supported. - - + + :returns: Iterator over :class:`CredentialInfo` @@ -134,7 +134,7 @@ w.git_credentials.delete(credential_id=cr.credential_id) Updates the specified Git credential. - + :param credential_id: int The ID for the corresponding credential to access. :param git_provider: str @@ -154,8 +154,8 @@ :param personal_access_token: str (optional) The personal access token used to authenticate to the corresponding Git provider. For certain providers, support may exist for other types of scoped access tokens. [Learn more]. - + [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html - - + + \ No newline at end of file diff --git a/docs/workspace/workspace/repos.rst b/docs/workspace/workspace/repos.rst index ceb833465..ad0f1a6b8 100644 --- a/docs/workspace/workspace/repos.rst +++ b/docs/workspace/workspace/repos.rst @@ -6,11 +6,11 @@ The Repos API allows users to manage their git repos. Users can use the API to access all repos that they have manage permissions on. - + Databricks Repos is a visual Git client in Databricks. It supports common Git operations such a cloning a repository, committing and pushing, pulling, branch management, and visual comparison of diffs when committing. - + Within Repos you can develop code in notebooks or other files and follow data science and engineering code development best practices using Git for version control, collaboration, and CI/CD. @@ -40,7 +40,7 @@ Creates a repo in the workspace and links it to the remote Git repo specified. Note that repos created programmatically must be linked to a remote Git repo, unlike repos created in the browser. - + :param url: str URL of the Git repository to be linked. :param provider: str @@ -53,18 +53,18 @@ :param sparse_checkout: :class:`SparseCheckout` (optional) If specified, the repo will be created with sparse checkout enabled. You cannot enable/disable sparse checkout after the repo is created. - + :returns: :class:`CreateRepoResponse` .. py:method:: delete(repo_id: int) Deletes the specified repo. - + :param repo_id: int The ID for the corresponding repo to delete. - - + + .. py:method:: get(repo_id: int) -> GetRepoResponse @@ -94,30 +94,30 @@ w.repos.delete(repo_id=ri.id) Returns the repo with the given repo ID. - + :param repo_id: int ID of the Git folder (repo) object in the workspace. - + :returns: :class:`GetRepoResponse` .. py:method:: get_permission_levels(repo_id: str) -> GetRepoPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param repo_id: str The repo for which to get or manage permissions. - + :returns: :class:`GetRepoPermissionLevelsResponse` .. py:method:: get_permissions(repo_id: str) -> RepoPermissions Gets the permissions of a repo. Repos can inherit permissions from their root object. - + :param repo_id: str The repo for which to get or manage permissions. - + :returns: :class:`RepoPermissions` @@ -137,7 +137,7 @@ Returns repos that the calling user has Manage permissions on. Use `next_page_token` to iterate through additional pages. - + :param next_page_token: str (optional) Token used to get the next page of results. If not specified, returns the first page of results as well as a next page token if there are more results. @@ -145,7 +145,7 @@ Filters repos that have paths starting with the given path prefix. If not provided or when provided an effectively empty prefix (`/` or `/Workspace`) Git folders (repos) from `/Workspace/Repos` will be served. - + :returns: Iterator over :class:`RepoInfo` @@ -153,11 +153,11 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. - + :param repo_id: str The repo for which to get or manage permissions. :param access_control_list: List[:class:`RepoAccessControlRequest`] (optional) - + :returns: :class:`RepoPermissions` @@ -189,7 +189,7 @@ Updates the repo to a different branch or tag, or updates the repo to the latest commit on the same branch. - + :param repo_id: int ID of the Git folder (repo) object in the workspace. :param branch: str (optional) @@ -201,17 +201,17 @@ Tag that the local version of the repo is checked out to. Updating the repo to a tag puts the repo in a detached HEAD state. Before committing new changes, you must update the repo to a branch instead of the detached HEAD. - - + + .. py:method:: update_permissions(repo_id: str [, access_control_list: Optional[List[RepoAccessControlRequest]]]) -> RepoPermissions Updates the permissions on a repo. Repos can inherit permissions from their root object. - + :param repo_id: str The repo for which to get or manage permissions. :param access_control_list: List[:class:`RepoAccessControlRequest`] (optional) - + :returns: :class:`RepoPermissions` \ No newline at end of file diff --git a/docs/workspace/workspace/secrets.rst b/docs/workspace/workspace/secrets.rst index 245554d32..b0281d28a 100644 --- a/docs/workspace/workspace/secrets.rst +++ b/docs/workspace/workspace/secrets.rst @@ -5,11 +5,11 @@ .. py:class:: SecretsAPI The Secrets API allows you to manage secrets, secret scopes, and access permissions. - + Sometimes accessing data requires that you authenticate to external data sources through JDBC. Instead of directly entering your credentials into a notebook, use Databricks secrets to store your credentials and reference them in notebooks and jobs. - + Administrators, secret creators, and users granted permission can read Databricks secrets. While Databricks makes an effort to redact secret values that might be displayed in notebooks, it is not possible to prevent such users from reading secrets. @@ -38,38 +38,38 @@ w.secrets.delete_scope(scope=scope_name) Creates a new secret scope. - + The scope name must consist of alphanumeric characters, dashes, underscores, and periods, and may not exceed 128 characters. - + Example request: - + .. code:: - + { "scope": "my-simple-databricks-scope", "initial_manage_principal": "users" "scope_backend_type": "databricks|azure_keyvault", # below is only required if scope type is azure_keyvault "backend_azure_keyvault": { "resource_id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xxxx/providers/Microsoft.KeyVault/vaults/xxxx", "tenant_id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", "dns_name": "https://xxxx.vault.azure.net/", } } - + If ``initial_manage_principal`` is specified, the initial ACL applied to the scope is applied to the supplied principal (user or group) with ``MANAGE`` permissions. The only supported principal for this option is the group ``users``, which contains all users in the workspace. If ``initial_manage_principal`` is not specified, the initial ACL with ``MANAGE`` permission applied to the scope is assigned to the API request issuer's user identity. - + If ``scope_backend_type`` is ``azure_keyvault``, a secret scope is created with secrets from a given Azure KeyVault. The caller must provide the keyvault_resource_id and the tenant_id for the key vault. If ``scope_backend_type`` is ``databricks`` or is unspecified, an empty secret scope is created and stored in Databricks's own storage. - + Throws ``RESOURCE_ALREADY_EXISTS`` if a scope with the given name already exists. Throws ``RESOURCE_LIMIT_EXCEEDED`` if maximum number of scopes in the workspace is exceeded. Throws ``INVALID_PARAMETER_VALUE`` if the scope name is invalid. Throws ``BAD_REQUEST`` if request violated constraints. Throws ``CUSTOMER_UNAUTHORIZED`` if normal user attempts to create a scope with name reserved for databricks internal usage. Throws ``UNAUTHENTICATED`` if unable to verify user access permission on Azure KeyVault - + :param scope: str Scope name requested by the user. Scope names are unique. :param backend_azure_keyvault: :class:`AzureKeyVaultSecretScopeMetadata` (optional) @@ -78,99 +78,99 @@ The principal that is initially granted ``MANAGE`` permission to the created scope. :param scope_backend_type: :class:`ScopeBackendType` (optional) The backend type the scope will be created with. If not specified, will default to ``DATABRICKS`` - - + + .. py:method:: delete_acl(scope: str, principal: str) Deletes the given ACL on the given scope. - + Users must have the ``MANAGE`` permission to invoke this API. - + Example request: - + .. code:: - + { "scope": "my-secret-scope", "principal": "data-scientists" } - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope, principal, or ACL exists. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. Throws ``INVALID_PARAMETER_VALUE`` if the permission or principal is invalid. - + :param scope: str The name of the scope to remove permissions from. :param principal: str The principal to remove an existing ACL from. - - + + .. py:method:: delete_scope(scope: str) Deletes a secret scope. - + Example request: - + .. code:: - + { "scope": "my-secret-scope" } - + Throws ``RESOURCE_DOES_NOT_EXIST`` if the scope does not exist. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. Throws ``BAD_REQUEST`` if system user attempts to delete internal secret scope. - + :param scope: str Name of the scope to delete. - - + + .. py:method:: delete_secret(scope: str, key: str) Deletes the secret stored in this secret scope. You must have ``WRITE`` or ``MANAGE`` permission on the Secret Scope. - + Example request: - + .. code:: - + { "scope": "my-secret-scope", "key": "my-secret-key" } - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope or secret exists. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. Throws ``BAD_REQUEST`` if system user attempts to delete an internal secret, or request is made against Azure KeyVault backed scope. - + :param scope: str The name of the scope that contains the secret to delete. :param key: str Name of the secret to delete. - - + + .. py:method:: get_acl(scope: str, principal: str) -> AclItem Describes the details about the given ACL, such as the group and permission. - + Users must have the ``MANAGE`` permission to invoke this API. - + Example response: - + .. code:: - + { "principal": "data-scientists", "permission": "READ" } - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope exists. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. Throws ``INVALID_PARAMETER_VALUE`` if the permission or principal is invalid. - + :param scope: str The name of the scope to fetch ACL information from. :param principal: str The principal to fetch ACL information for. - + :returns: :class:`AclItem` @@ -178,34 +178,34 @@ Gets a secret for a given key and scope. This API can only be called from the DBUtils interface. Users need the READ permission to make this call. - + Example response: - + .. code:: - + { "key": "my-string-key", "value": } - + Note that the secret value returned is in bytes. The interpretation of the bytes is determined by the caller in DBUtils and the type the data is decoded into. - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret or secret scope exists. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. - + Note: This is explicitly an undocumented API. It also doesn't need to be supported for the /preview prefix, because it's not a customer-facing API (i.e. only used for DBUtils SecretUtils to fetch secrets). - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope or secret exists. Throws ``BAD_REQUEST`` if normal user calls get secret outside of a notebook. AKV specific errors: Throws ``INVALID_PARAMETER_VALUE`` if secret name is not alphanumeric or too long. Throws ``PERMISSION_DENIED`` if secret manager cannot access AKV with 403 error Throws ``MALFORMED_REQUEST`` if secret manager cannot access AKV with any other 4xx error - + :param scope: str The name of the scope that contains the secret. :param key: str Name of the secret to fetch value information. - + :returns: :class:`GetSecretResponse` @@ -235,22 +235,22 @@ w.secrets.delete_scope(scope=scope_name) Lists the ACLs set on the given scope. - + Users must have the ``MANAGE`` permission to invoke this API. - + Example response: - + .. code:: - + { "acls": [{ "principal": "admins", "permission": "MANAGE" },{ "principal": "data-scientists", "permission": "READ" }] } - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope exists. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. - + :param scope: str The name of the scope to fetch ACL information from. - + :returns: Iterator over :class:`AclItem` @@ -268,17 +268,17 @@ scopes = w.secrets.list_scopes() Lists all secret scopes available in the workspace. - + Example response: - + .. code:: - + { "scopes": [{ "name": "my-databricks-scope", "backend_type": "DATABRICKS" },{ "name": "mount-points", "backend_type": "DATABRICKS" }] } - + Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. - - + + :returns: Iterator over :class:`SecretScope` @@ -309,22 +309,22 @@ Lists the secret keys that are stored at this scope. This is a metadata-only operation; secret data cannot be retrieved using this API. Users need the READ permission to make this call. - + Example response: - + .. code:: - + { "secrets": [ { "key": "my-string-key"", "last_updated_timestamp": "1520467595000" }, { "key": "my-byte-key", "last_updated_timestamp": "1520467595000" }, ] } - + The lastUpdatedTimestamp returned is in milliseconds since epoch. - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope exists. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. - + :param scope: str The name of the scope to list secrets within. - + :returns: Iterator over :class:`SecretMetadata` @@ -364,40 +364,40 @@ Creates or overwrites the ACL associated with the given principal (user or group) on the specified scope point. In general, a user or group will use the most powerful permission available to them, and permissions are ordered as follows: - + * ``MANAGE`` - Allowed to change ACLs, and read and write to this secret scope. * ``WRITE`` - Allowed to read and write to this secret scope. * ``READ`` - Allowed to read this secret scope and list what secrets are available. - + Note that in general, secret values can only be read from within a command on a cluster (for example, through a notebook). There is no API to read the actual secret value material outside of a cluster. However, the user's permission will be applied based on who is executing the command, and they must have at least READ permission. - + Users must have the ``MANAGE`` permission to invoke this API. - + Example request: - + .. code:: - + { "scope": "my-secret-scope", "principal": "data-scientists", "permission": "READ" } - + The principal is a user or group name corresponding to an existing Databricks principal to be granted or revoked access. - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope exists. Throws ``RESOURCE_ALREADY_EXISTS`` if a permission for the principal already exists. Throws ``INVALID_PARAMETER_VALUE`` if the permission or principal is invalid. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. - + :param scope: str The name of the scope to apply permissions to. :param principal: str The principal in which the permission is applied. :param permission: :class:`AclPermission` The permission level applied to the principal. - - + + .. py:method:: put_secret(scope: str, key: str [, bytes_value: Optional[str], string_value: Optional[str]]) @@ -429,26 +429,26 @@ same name, this command overwrites the existing secret's value. The server encrypts the secret using the secret scope's encryption settings before storing it. You must have ``WRITE`` or ``MANAGE`` permission on the secret scope. - + The secret key must consist of alphanumeric characters, dashes, underscores, and periods, and cannot exceed 128 characters. The maximum allowed secret value size is 128 KB. The maximum number of secrets in a given scope is 1000. - + Example request: - + .. code:: - + { "scope": "my-databricks-scope", "key": "my-string-key", "string_value": "foobar" } - + The input fields "string_value" or "bytes_value" specify the type of the secret, which will determine the value returned when the secret value is requested. Exactly one must be specified. - + Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret scope exists. Throws ``RESOURCE_LIMIT_EXCEEDED`` if maximum number of secrets in scope is exceeded. Throws ``INVALID_PARAMETER_VALUE`` if the request parameters are invalid. Throws ``PERMISSION_DENIED`` if the user does not have permission to make this API call. Throws ``MALFORMED_REQUEST`` if request is incorrectly formatted or conflicting. Throws ``BAD_REQUEST`` if request is made against Azure KeyVault backed scope. - + :param scope: str The name of the scope to which the secret will be associated with. :param key: str @@ -457,6 +457,6 @@ If specified, value will be stored as bytes. :param string_value: str (optional) If specified, note that the value will be stored in UTF-8 (MB4) form. - - + + \ No newline at end of file diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 4fba581e8..2a80bdc42 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -5,7 +5,7 @@ .. py:class:: WorkspaceExt The Workspace API allows you to list, import, export, and delete notebooks and folders. - + A notebook is a web-based interface to a document that contains runnable code, visualizations, and explanatory text. @@ -15,17 +15,17 @@ If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. * If `path` is a non-empty directory and `recursive` is set to `false`, this call returns an error `DIRECTORY_NOT_EMPTY`. - + Object deletion cannot be undone and deleting a directory recursively is not atomic. - + :param path: str The absolute path of the notebook or directory. :param recursive: bool (optional) The flag that specifies whether to delete the object recursively. It is `false` by default. Please note this deleting directory is not atomic. If it fails in the middle, some of objects under this directory may be deleted and cannot be undone. - - + + .. py:method:: download(path: str [, format: ExportFormat]) -> BinaryIO @@ -82,39 +82,39 @@ export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. - + If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - + If the exported data would exceed size limit, this call returns `MAX_NOTEBOOK_SIZE_EXCEEDED`. Currently, this API does not support exporting a library. - + :param path: str The absolute path of the object or directory. Exporting a directory is only supported for the `DBC`, `SOURCE`, and `AUTO` format. :param format: :class:`ExportFormat` (optional) This specifies the format of the exported file. By default, this is `SOURCE`. - + The value is case sensitive. - + - `SOURCE`: The notebook is exported as source code. Directory exports will not include non-notebook entries. - `HTML`: The notebook is exported as an HTML file. - `JUPYTER`: The notebook is exported as a Jupyter/IPython Notebook file. - `DBC`: The notebook is exported in Databricks archive format. Directory exports will not include non-notebook entries. - `R_MARKDOWN`: The notebook is exported to R Markdown format. - `AUTO`: The object or directory is exported depending on the objects type. Directory exports will include notebooks and workspace files. - + :returns: :class:`ExportResponse` .. py:method:: get_permission_levels(workspace_object_type: str, workspace_object_id: str) -> GetWorkspaceObjectPermissionLevelsResponse Gets the permission levels that a user can have on an object. - + :param workspace_object_type: str The workspace object type for which to get or manage permissions. :param workspace_object_id: str The workspace object for which to get or manage permissions. - + :returns: :class:`GetWorkspaceObjectPermissionLevelsResponse` @@ -122,12 +122,12 @@ Gets the permissions of a workspace object. Workspace objects can inherit permissions from their parent objects or root object. - + :param workspace_object_type: str The workspace object type for which to get or manage permissions. :param workspace_object_id: str The workspace object for which to get or manage permissions. - + :returns: :class:`WorkspaceObjectPermissions` @@ -150,10 +150,10 @@ Gets the status of an object or a directory. If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - + :param path: str The absolute path of the notebook or directory. - + :returns: :class:`ObjectInfo` @@ -187,20 +187,20 @@ `RESOURCE_ALREADY_EXISTS`. To import a directory, you can use either the `DBC` format or the `SOURCE` format with the `language` field unset. To import a single file as `SOURCE`, you must set the `language` field. - + :param path: str The absolute path of the object or directory. Importing a directory is only supported for the `DBC` and `SOURCE` formats. :param content: str (optional) The base64-encoded content. This has a limit of 10 MB. - + If the limit (10MB) is exceeded, exception with error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. This parameter might be absent, and instead a posted file is used. :param format: :class:`ImportFormat` (optional) This specifies the format of the file to be imported. - + The value is case sensitive. - + - `AUTO`: The item is imported depending on an analysis of the item's extension and the header content provided in the request. If the item is imported as a notebook, then the item's extension is automatically removed. - `SOURCE`: The notebook or directory is imported as source code. - `HTML`: @@ -212,8 +212,8 @@ :param overwrite: bool (optional) The flag that specifies whether to overwrite existing object. It is `false` by default. For `DBC` format, `overwrite` is not supported since it may contain a directory. - - + + .. py:method:: list(path: str [, notebooks_modified_after: int, recursive: bool = False]) -> ObjectInfo @@ -245,15 +245,15 @@ Creates the specified directory (and necessary parent directories if they do not exist). If there is an object (not a directory) at any prefix of the input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. - + Note that if this operation fails it may have succeeded in creating some of the necessary parent directories. - + :param path: str The absolute path of the directory. If the parent directories do not exist, it will also create them. If the directory already exists, this command will do nothing and succeed. - - + + .. py:method:: set_permissions(workspace_object_type: str, workspace_object_id: str [, access_control_list: Optional[List[WorkspaceObjectAccessControlRequest]]]) -> WorkspaceObjectPermissions @@ -261,13 +261,13 @@ Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct permissions if none are specified. Objects can inherit permissions from their parent objects or root object. - + :param workspace_object_type: str The workspace object type for which to get or manage permissions. :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) - + :returns: :class:`WorkspaceObjectPermissions` @@ -275,13 +275,13 @@ Updates the permissions on a workspace object. Workspace objects can inherit permissions from their parent objects or root object. - + :param workspace_object_type: str The workspace object type for which to get or manage permissions. :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) - + :returns: :class:`WorkspaceObjectPermissions`