From 782a565ecd9b73a54c45200192122e5b8452d8b6 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 1 Nov 2024 10:30:17 +0100 Subject: [PATCH 1/3] [Internal] Always write message for manual test execution (#811) ## Changes Old script could not be run from master due to security restrictions and there is no reliable way to detect if a user as secrets. ## Tests Opened a PR in SDK Java from fork https://github.com/databricks/databricks-sdk-java/pull/375 --- .github/workflows/external-message.yml | 68 ++----------------------- .github/workflows/integration-tests.yml | 19 +++---- 2 files changed, 15 insertions(+), 72 deletions(-) diff --git a/.github/workflows/external-message.yml b/.github/workflows/external-message.yml index 3392fc8e..a2d9dc2e 100644 --- a/.github/workflows/external-message.yml +++ b/.github/workflows/external-message.yml @@ -11,7 +11,6 @@ on: branches: - main - jobs: comment-on-pr: runs-on: ubuntu-latest @@ -19,73 +18,15 @@ jobs: pull-requests: write steps: - # NOTE: The following checks may not be accurate depending on Org or Repo settings. - - name: Check user and potential secret access - id: check-secrets-access - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - USER_LOGIN="${{ github.event.pull_request.user.login }}" - REPO_OWNER="${{ github.repository_owner }}" - REPO_NAME="${{ github.event.repository.name }}" - - echo "Pull request opened by: $USER_LOGIN" - - # Check if PR is from a fork - IS_FORK=$([[ "${{ github.event.pull_request.head.repo.full_name }}" != "${{ github.repository }}" ]] && echo "true" || echo "false") - - HAS_ACCESS="false" - - # Check user's permission level on the repository - USER_PERMISSION=$(gh api repos/$REPO_OWNER/$REPO_NAME/collaborators/$USER_LOGIN/permission --jq '.permission') - - if [[ "$USER_PERMISSION" == "admin" || "$USER_PERMISSION" == "write" ]]; then - HAS_ACCESS="true" - elif [[ "$USER_PERMISSION" == "read" ]]; then - # For read access, we need to check if the user has been explicitly granted secret access - # This information is not directly available via API, so we'll make an assumption - # that read access does not imply secret access - HAS_ACCESS="false" - fi - - # Check if repo owner is an organization - IS_ORG=$(gh api users/$REPO_OWNER --jq '.type == "Organization"') - - if [[ "$IS_ORG" == "true" && "$HAS_ACCESS" == "false" ]]; then - # Check if user is a member of any team with write or admin access to the repo - TEAMS_WITH_ACCESS=$(gh api repos/$REPO_OWNER/$REPO_NAME/teams --jq '.[] | select(.permission == "push" or .permission == "admin") | .slug') - for team in $TEAMS_WITH_ACCESS; do - IS_TEAM_MEMBER=$(gh api orgs/$REPO_OWNER/teams/$team/memberships/$USER_LOGIN --silent && echo "true" || echo "false") - if [[ "$IS_TEAM_MEMBER" == "true" ]]; then - HAS_ACCESS="true" - break - fi - done - fi - - # If it's a fork, set HAS_ACCESS to false regardless of other checks - if [[ "$IS_FORK" == "true" ]]; then - HAS_ACCESS="false" - fi - - echo "has_secrets_access=$HAS_ACCESS" >> $GITHUB_OUTPUT - if [[ "$HAS_ACCESS" == "true" ]]; then - echo "User $USER_LOGIN likely has access to secrets" - else - echo "User $USER_LOGIN likely does not have access to secrets" - fi - - - uses: actions/checkout@v4 - name: Delete old comments - if: steps.check-secrets-access.outputs.has_secrets_access != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | # Delete previous comment if it exists previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ - --jq '.[] | select(.body | startswith("")) | .id') + --jq '.[] | select(.body | startswith("")) | .id') echo "Previous comment IDs: $previous_comment_ids" # Iterate over each comment ID and delete the comment if [ ! -z "$previous_comment_ids" ]; then @@ -96,14 +37,15 @@ jobs: fi - name: Comment on PR - if: steps.check-secrets-access.outputs.has_secrets_access != 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} COMMIT_SHA: ${{ github.event.pull_request.head.sha }} run: | gh pr comment ${{ github.event.pull_request.number }} --body \ - " - Run integration tests manually: + " + If integration tests don't run automatically, an authorized user can run them manually by following the instructions below: + + Trigger: [go/deco-tests-run/sdk-py](https://go/deco-tests-run/sdk-py) Inputs: diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 88d3e865..93a6c267 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -12,20 +12,21 @@ jobs: check-token: name: Check secrets access runs-on: ubuntu-latest + environment: "test-trigger-is" outputs: has_token: ${{ steps.set-token-status.outputs.has_token }} steps: - - name: Check if GITHUB_TOKEN is set + - name: Check if DECO_WORKFLOW_TRIGGER_APP_ID is set id: set-token-status run: | - if [ -z "${{ secrets.GITHUB_TOKEN }}" ]; then - echo "GITHUB_TOKEN is empty. User has no access to tokens." - echo "::set-output name=has_token::false" - else - echo "GITHUB_TOKEN is set. User has no access to tokens." - echo "::set-output name=has_token::true" - fi - + if [ -z "${{ secrets.DECO_WORKFLOW_TRIGGER_APP_ID }}" ]; then + echo "DECO_WORKFLOW_TRIGGER_APP_ID is empty. User has no access to secrets." + echo "::set-output name=has_token::false" + else + echo "DECO_WORKFLOW_TRIGGER_APP_ID is set. User has access to secrets." + echo "::set-output name=has_token::true" + fi + trigger-tests: name: Trigger Tests runs-on: ubuntu-latest From 216709fe2cc766c66e5e43ac114ea36d51eedb25 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Tue, 5 Nov 2024 17:24:32 +0100 Subject: [PATCH 2/3] [Release] Release v0.37.0 (#813) ### Bug Fixes * Correctly generate classes with nested body fields ([#808](https://github.com/databricks/databricks-sdk-py/pull/808)). ### Internal Changes * Add `cleanrooms` package ([#806](https://github.com/databricks/databricks-sdk-py/pull/806)). * Add test instructions for external contributors ([#804](https://github.com/databricks/databricks-sdk-py/pull/804)). * Always write message for manual test execution ([#811](https://github.com/databricks/databricks-sdk-py/pull/811)). * Automatically trigger integration tests on PR ([#800](https://github.com/databricks/databricks-sdk-py/pull/800)). * Better isolate ML serving auth unit tests ([#803](https://github.com/databricks/databricks-sdk-py/pull/803)). * Move templates in the code generator ([#809](https://github.com/databricks/databricks-sdk-py/pull/809)). ### API Changes: * Added [w.aibi_dashboard_embedding_access_policy](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/settings/aibi_dashboard_embedding_access_policy.html) workspace-level service and [w.aibi_dashboard_embedding_approved_domains](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/settings/aibi_dashboard_embedding_approved_domains.html) workspace-level service. * Added [w.credentials](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/credentials.html) workspace-level service. * Added `app_deployment` field for `databricks.sdk.service.apps.CreateAppDeploymentRequest`. * Added `app` field for `databricks.sdk.service.apps.CreateAppRequest`. * Added `app` field for `databricks.sdk.service.apps.UpdateAppRequest`. * Added `table` field for `databricks.sdk.service.catalog.CreateOnlineTableRequest`. * Added `azure_aad` field for `databricks.sdk.service.catalog.GenerateTemporaryTableCredentialResponse`. * Added `full_name` field for `databricks.sdk.service.catalog.StorageCredentialInfo`. * Added `dashboard` field for `databricks.sdk.service.dashboards.CreateDashboardRequest`. * Added `schedule` field for `databricks.sdk.service.dashboards.CreateScheduleRequest`. * Added `subscription` field for `databricks.sdk.service.dashboards.CreateSubscriptionRequest`. * Added `warehouse_id` field for `databricks.sdk.service.dashboards.Schedule`. * Added `dashboard` field for `databricks.sdk.service.dashboards.UpdateDashboardRequest`. * Added `schedule` field for `databricks.sdk.service.dashboards.UpdateScheduleRequest`. * Added `page_token` field for `databricks.sdk.service.oauth2.ListServicePrincipalSecretsRequest`. * Added `next_page_token` field for `databricks.sdk.service.oauth2.ListServicePrincipalSecretsResponse`. * Added `connection_name` field for `databricks.sdk.service.pipelines.IngestionGatewayPipelineDefinition`. * Added `is_no_public_ip_enabled` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. * Added `external_customer_info` and `is_no_public_ip_enabled` fields for `databricks.sdk.service.provisioning.Workspace`. * Added `last_used_day` field for `databricks.sdk.service.settings.TokenInfo`. * Changed `create()` method for [w.apps](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/apps.html) workspace-level service with new required argument order. * Changed `execute_message_query()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/genie.html) workspace-level service . New request type is `databricks.sdk.service.dashboards.GenieExecuteMessageQueryRequest` dataclass. * Changed `execute_message_query()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/genie.html) workspace-level service to type `execute_message_query()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/genie.html) workspace-level service. * Changed `create()`, `create_schedule()`, `create_subscription()` and `update_schedule()` methods for [w.lakeview](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/lakeview.html) workspace-level service with new required argument order. * Removed [w.clean_rooms](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/clean_rooms.html) workspace-level service. * Removed `prev_page_token` field for `databricks.sdk.service.jobs.Run`. * Removed `deployment_id`, `mode` and `source_code_path` fields for `databricks.sdk.service.apps.CreateAppDeploymentRequest`. * Removed `description`, `name` and `resources` fields for `databricks.sdk.service.apps.CreateAppRequest`. * Removed `description` and `resources` fields for `databricks.sdk.service.apps.UpdateAppRequest`. * Removed `name` and `spec` fields for `databricks.sdk.service.catalog.CreateOnlineTableRequest`. * Removed `display_name`, `parent_path`, `serialized_dashboard` and `warehouse_id` fields for `databricks.sdk.service.dashboards.CreateDashboardRequest`. * Removed `cron_schedule`, `display_name` and `pause_status` fields for `databricks.sdk.service.dashboards.CreateScheduleRequest`. * Removed `subscriber` field for `databricks.sdk.service.dashboards.CreateSubscriptionRequest`. * Removed `display_name`, `etag`, `serialized_dashboard` and `warehouse_id` fields for `databricks.sdk.service.dashboards.UpdateDashboardRequest`. * Removed `cron_schedule`, `display_name`, `etag` and `pause_status` fields for `databricks.sdk.service.dashboards.UpdateScheduleRequest`. OpenAPI SHA: 5285ce76f81314f342c1702d5c2ad4ef42488781, Date: 2024-11-04 --- .codegen/_openapi_sha | 2 +- CHANGELOG.md | 57 ++ databricks/sdk/__init__.py | 46 +- databricks/sdk/service/apps.py | 234 +---- databricks/sdk/service/catalog.py | 833 +++++++++++++++++- databricks/sdk/service/compute.py | 43 +- databricks/sdk/service/dashboards.py | 302 +------ databricks/sdk/service/iam.py | 6 +- databricks/sdk/service/jobs.py | 144 +-- databricks/sdk/service/marketplace.py | 1 + databricks/sdk/service/ml.py | 7 +- databricks/sdk/service/oauth2.py | 36 +- databricks/sdk/service/pipelines.py | 21 +- databricks/sdk/service/provisioning.py | 53 ++ databricks/sdk/service/serving.py | 4 +- databricks/sdk/service/settings.py | 320 ++++++- databricks/sdk/service/sharing.py | 618 ------------- databricks/sdk/service/sql.py | 14 +- databricks/sdk/service/workspace.py | 8 +- databricks/sdk/version.py | 2 +- .../oauth2/service_principal_secrets.rst | 9 +- docs/account/provisioning/workspaces.rst | 6 +- docs/dbdataclasses/apps.rst | 12 - docs/dbdataclasses/catalog.rst | 96 +- docs/dbdataclasses/dashboards.rst | 23 +- docs/dbdataclasses/marketplace.rst | 3 + docs/dbdataclasses/provisioning.rst | 4 + docs/dbdataclasses/settings.rst | 35 + docs/dbdataclasses/sharing.rst | 119 --- docs/dbdataclasses/sql.rst | 4 +- docs/workspace/apps/apps.rst | 40 +- docs/workspace/catalog/external_locations.rst | 1 - docs/workspace/catalog/online_tables.rst | 19 +- .../workspace/catalog/storage_credentials.rst | 1 - docs/workspace/compute/cluster_policies.rst | 3 +- docs/workspace/compute/clusters.rst | 5 +- docs/workspace/compute/instance_pools.rst | 3 +- docs/workspace/dashboards/lakeview.rst | 60 +- docs/workspace/iam/permissions.rst | 3 +- docs/workspace/iam/users.rst | 3 +- docs/workspace/index.rst | 1 + docs/workspace/jobs/jobs.rst | 17 +- docs/workspace/ml/experiments.rst | 3 +- docs/workspace/ml/model_registry.rst | 4 +- docs/workspace/pipelines/pipelines.rst | 3 +- docs/workspace/provisioning/credentials.rst | 123 +++ docs/workspace/provisioning/index.rst | 10 + docs/workspace/serving/serving_endpoints.rst | 4 +- ...aibi_dashboard_embedding_access_policy.rst | 42 + ...i_dashboard_embedding_approved_domains.rst | 42 + docs/workspace/settings/index.rst | 2 + docs/workspace/settings/settings.rst | 12 + docs/workspace/settings/token_management.rst | 3 +- docs/workspace/sharing/index.rst | 1 - docs/workspace/sql/statement_execution.rst | 9 +- docs/workspace/sql/warehouses.rst | 3 +- docs/workspace/workspace/repos.rst | 3 +- docs/workspace/workspace/workspace.rst | 5 +- 58 files changed, 1928 insertions(+), 1559 deletions(-) create mode 100644 docs/workspace/provisioning/credentials.rst create mode 100644 docs/workspace/provisioning/index.rst create mode 100644 docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst create mode 100644 docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2d9cb6d8..00e5d84f 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file +5285ce76f81314f342c1702d5c2ad4ef42488781 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 458921ee..409fce70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,62 @@ # Version changelog +## [Release] Release v0.37.0 + +### Bug Fixes + + * Correctly generate classes with nested body fields ([#808](https://github.com/databricks/databricks-sdk-py/pull/808)). + + +### Internal Changes + + * Add `cleanrooms` package ([#806](https://github.com/databricks/databricks-sdk-py/pull/806)). + * Add test instructions for external contributors ([#804](https://github.com/databricks/databricks-sdk-py/pull/804)). + * Always write message for manual test execution ([#811](https://github.com/databricks/databricks-sdk-py/pull/811)). + * Automatically trigger integration tests on PR ([#800](https://github.com/databricks/databricks-sdk-py/pull/800)). + * Better isolate ML serving auth unit tests ([#803](https://github.com/databricks/databricks-sdk-py/pull/803)). + * Move templates in the code generator ([#809](https://github.com/databricks/databricks-sdk-py/pull/809)). + + +### API Changes: + + * Added [w.aibi_dashboard_embedding_access_policy](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/settings/aibi_dashboard_embedding_access_policy.html) workspace-level service and [w.aibi_dashboard_embedding_approved_domains](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/settings/aibi_dashboard_embedding_approved_domains.html) workspace-level service. + * Added [w.credentials](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/credentials.html) workspace-level service. + * Added `app_deployment` field for `databricks.sdk.service.apps.CreateAppDeploymentRequest`. + * Added `app` field for `databricks.sdk.service.apps.CreateAppRequest`. + * Added `app` field for `databricks.sdk.service.apps.UpdateAppRequest`. + * Added `table` field for `databricks.sdk.service.catalog.CreateOnlineTableRequest`. + * Added `azure_aad` field for `databricks.sdk.service.catalog.GenerateTemporaryTableCredentialResponse`. + * Added `full_name` field for `databricks.sdk.service.catalog.StorageCredentialInfo`. + * Added `dashboard` field for `databricks.sdk.service.dashboards.CreateDashboardRequest`. + * Added `schedule` field for `databricks.sdk.service.dashboards.CreateScheduleRequest`. + * Added `subscription` field for `databricks.sdk.service.dashboards.CreateSubscriptionRequest`. + * Added `warehouse_id` field for `databricks.sdk.service.dashboards.Schedule`. + * Added `dashboard` field for `databricks.sdk.service.dashboards.UpdateDashboardRequest`. + * Added `schedule` field for `databricks.sdk.service.dashboards.UpdateScheduleRequest`. + * Added `page_token` field for `databricks.sdk.service.oauth2.ListServicePrincipalSecretsRequest`. + * Added `next_page_token` field for `databricks.sdk.service.oauth2.ListServicePrincipalSecretsResponse`. + * Added `connection_name` field for `databricks.sdk.service.pipelines.IngestionGatewayPipelineDefinition`. + * Added `is_no_public_ip_enabled` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. + * Added `external_customer_info` and `is_no_public_ip_enabled` fields for `databricks.sdk.service.provisioning.Workspace`. + * Added `last_used_day` field for `databricks.sdk.service.settings.TokenInfo`. + * Changed `create()` method for [w.apps](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/apps.html) workspace-level service with new required argument order. + * Changed `execute_message_query()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/genie.html) workspace-level service . New request type is `databricks.sdk.service.dashboards.GenieExecuteMessageQueryRequest` dataclass. + * Changed `execute_message_query()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/genie.html) workspace-level service to type `execute_message_query()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/genie.html) workspace-level service. + * Changed `create()`, `create_schedule()`, `create_subscription()` and `update_schedule()` methods for [w.lakeview](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/lakeview.html) workspace-level service with new required argument order. + * Removed [w.clean_rooms](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/clean_rooms.html) workspace-level service. + * Removed `deployment_id`, `mode` and `source_code_path` fields for `databricks.sdk.service.apps.CreateAppDeploymentRequest`. + * Removed `description`, `name` and `resources` fields for `databricks.sdk.service.apps.CreateAppRequest`. + * Removed `description` and `resources` fields for `databricks.sdk.service.apps.UpdateAppRequest`. + * Removed `name` and `spec` fields for `databricks.sdk.service.catalog.CreateOnlineTableRequest`. + * Removed `display_name`, `parent_path`, `serialized_dashboard` and `warehouse_id` fields for `databricks.sdk.service.dashboards.CreateDashboardRequest`. + * Removed `cron_schedule`, `display_name` and `pause_status` fields for `databricks.sdk.service.dashboards.CreateScheduleRequest`. + * Removed `subscriber` field for `databricks.sdk.service.dashboards.CreateSubscriptionRequest`. + * Removed `display_name`, `etag`, `serialized_dashboard` and `warehouse_id` fields for `databricks.sdk.service.dashboards.UpdateDashboardRequest`. + * Removed `cron_schedule`, `display_name`, `etag` and `pause_status` fields for `databricks.sdk.service.dashboards.UpdateScheduleRequest`. + * Removed `prev_page_token` field for `databricks.sdk.service.jobs.Run`. + +OpenAPI SHA: 5285ce76f81314f342c1702d5c2ad4ef42488781, Date: 2024-11-04 + ## [Release] Release v0.36.0 ### Breaking Changes diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 15994646..746f8d7e 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -15,7 +15,7 @@ AccountMetastoresAPI, AccountStorageCredentialsAPI, ArtifactAllowlistsAPI, CatalogsAPI, - ConnectionsAPI, + ConnectionsAPI, CredentialsAPI, ExternalLocationsAPI, FunctionsAPI, GrantsAPI, MetastoresAPI, ModelVersionsAPI, OnlineTablesAPI, @@ -64,26 +64,18 @@ Workspace, WorkspacesAPI) from databricks.sdk.service.serving import (ServingEndpointsAPI, ServingEndpointsDataPlaneAPI) -from databricks.sdk.service.settings import (AccountIpAccessListsAPI, - AccountSettingsAPI, - AutomaticClusterUpdateAPI, - ComplianceSecurityProfileAPI, - CredentialsManagerAPI, - CspEnablementAccountAPI, - DefaultNamespaceAPI, - DisableLegacyAccessAPI, - DisableLegacyDbfsAPI, - DisableLegacyFeaturesAPI, - EnhancedSecurityMonitoringAPI, - EsmEnablementAccountAPI, - IpAccessListsAPI, - NetworkConnectivityAPI, - NotificationDestinationsAPI, - PersonalComputeAPI, - RestrictWorkspaceAdminsAPI, - SettingsAPI, TokenManagementAPI, - TokensAPI, WorkspaceConfAPI) -from databricks.sdk.service.sharing import (CleanRoomsAPI, ProvidersAPI, +from databricks.sdk.service.settings import ( + AccountIpAccessListsAPI, AccountSettingsAPI, + AibiDashboardEmbeddingAccessPolicyAPI, + AibiDashboardEmbeddingApprovedDomainsAPI, AutomaticClusterUpdateAPI, + ComplianceSecurityProfileAPI, CredentialsManagerAPI, + CspEnablementAccountAPI, DefaultNamespaceAPI, DisableLegacyAccessAPI, + DisableLegacyDbfsAPI, DisableLegacyFeaturesAPI, + EnhancedSecurityMonitoringAPI, EsmEnablementAccountAPI, IpAccessListsAPI, + NetworkConnectivityAPI, NotificationDestinationsAPI, PersonalComputeAPI, + RestrictWorkspaceAdminsAPI, SettingsAPI, TokenManagementAPI, TokensAPI, + WorkspaceConfAPI) +from databricks.sdk.service.sharing import (ProvidersAPI, RecipientActivationAPI, RecipientsAPI, SharesAPI) from databricks.sdk.service.sql import (AlertsAPI, AlertsLegacyAPI, @@ -183,7 +175,6 @@ def __init__(self, self._apps = AppsAPI(self._api_client) self._artifact_allowlists = ArtifactAllowlistsAPI(self._api_client) self._catalogs = CatalogsAPI(self._api_client) - self._clean_rooms = CleanRoomsAPI(self._api_client) self._cluster_policies = ClusterPoliciesAPI(self._api_client) self._clusters = ClustersExt(self._api_client) self._command_execution = CommandExecutionAPI(self._api_client) @@ -193,6 +184,7 @@ def __init__(self, self._consumer_listings = ConsumerListingsAPI(self._api_client) self._consumer_personalization_requests = ConsumerPersonalizationRequestsAPI(self._api_client) self._consumer_providers = ConsumerProvidersAPI(self._api_client) + self._credentials = CredentialsAPI(self._api_client) self._credentials_manager = CredentialsManagerAPI(self._api_client) self._current_user = CurrentUserAPI(self._api_client) self._dashboard_widgets = DashboardWidgetsAPI(self._api_client) @@ -312,11 +304,6 @@ def catalogs(self) -> CatalogsAPI: """A catalog is the first layer of Unity Catalog’s three-level namespace.""" return self._catalogs - @property - def clean_rooms(self) -> CleanRoomsAPI: - """A clean room is a secure, privacy-protecting environment where two or more parties can share sensitive enterprise data, including customer data, for measurements, insights, activation and other use cases.""" - return self._clean_rooms - @property def cluster_policies(self) -> ClusterPoliciesAPI: """You can use cluster policies to control users' ability to configure clusters based on a set of rules.""" @@ -362,6 +349,11 @@ def consumer_providers(self) -> ConsumerProvidersAPI: """Providers are the entities that publish listings to the Marketplace.""" return self._consumer_providers + @property + def credentials(self) -> CredentialsAPI: + """A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant.""" + return self._credentials + @property def credentials_manager(self) -> CredentialsManagerAPI: """Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens.""" diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index 52796d0e..4123ea08 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -611,70 +611,6 @@ def from_dict(cls, d: Dict[str, any]) -> ComputeStatus: return cls(message=d.get('message', None), state=_enum(d, 'state', ComputeState)) -@dataclass -class CreateAppDeploymentRequest: - app_name: Optional[str] = None - """The name of the app.""" - - deployment_id: Optional[str] = None - """The unique id of the deployment.""" - - mode: Optional[AppDeploymentMode] = None - """The mode of which the deployment will manage the source code.""" - - source_code_path: Optional[str] = None - """The workspace file system path of the source code used to create the app deployment. This is - different from `deployment_artifacts.source_code_path`, which is the path used by the deployed - app. The former refers to the original source code location of the app in the workspace during - deployment creation, whereas the latter provides a system generated stable snapshotted source - code path used by the deployment.""" - - def as_dict(self) -> dict: - """Serializes the CreateAppDeploymentRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.app_name is not None: body['app_name'] = self.app_name - if self.deployment_id is not None: body['deployment_id'] = self.deployment_id - if self.mode is not None: body['mode'] = self.mode.value - if self.source_code_path is not None: body['source_code_path'] = self.source_code_path - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateAppDeploymentRequest: - """Deserializes the CreateAppDeploymentRequest from a dictionary.""" - return cls(app_name=d.get('app_name', None), - deployment_id=d.get('deployment_id', None), - mode=_enum(d, 'mode', AppDeploymentMode), - source_code_path=d.get('source_code_path', None)) - - -@dataclass -class CreateAppRequest: - name: str - """The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. - It must be unique within the workspace.""" - - description: Optional[str] = None - """The description of the app.""" - - resources: Optional[List[AppResource]] = None - """Resources for the app.""" - - def as_dict(self) -> dict: - """Serializes the CreateAppRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.description is not None: body['description'] = self.description - if self.name is not None: body['name'] = self.name - if self.resources: body['resources'] = [v.as_dict() for v in self.resources] - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateAppRequest: - """Deserializes the CreateAppRequest from a dictionary.""" - return cls(description=d.get('description', None), - name=d.get('name', None), - resources=_repeated_dict(d, 'resources', AppResource)) - - @dataclass class GetAppPermissionLevelsResponse: permission_levels: Optional[List[AppPermissionsDescription]] = None @@ -746,34 +682,6 @@ class StopAppRequest: """The name of the app.""" -@dataclass -class UpdateAppRequest: - name: str - """The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. - It must be unique within the workspace.""" - - description: Optional[str] = None - """The description of the app.""" - - resources: Optional[List[AppResource]] = None - """Resources for the app.""" - - def as_dict(self) -> dict: - """Serializes the UpdateAppRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.description is not None: body['description'] = self.description - if self.name is not None: body['name'] = self.name - if self.resources: body['resources'] = [v.as_dict() for v in self.resources] - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> UpdateAppRequest: - """Deserializes the UpdateAppRequest from a dictionary.""" - return cls(description=d.get('description', None), - name=d.get('name', None), - resources=_repeated_dict(d, 'resources', AppResource)) - - class AppsAPI: """Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.""" @@ -813,29 +721,31 @@ def wait_get_app_active(self, attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_get_app_stopped(self, - name: str, - timeout=timedelta(minutes=20), - callback: Optional[Callable[[App], None]] = None) -> App: + def wait_get_deployment_app_succeeded( + self, + app_name: str, + deployment_id: str, + timeout=timedelta(minutes=20), + callback: Optional[Callable[[AppDeployment], None]] = None) -> AppDeployment: deadline = time.time() + timeout.total_seconds() - target_states = (ComputeState.STOPPED, ) - failure_states = (ComputeState.ERROR, ) + target_states = (AppDeploymentState.SUCCEEDED, ) + failure_states = (AppDeploymentState.FAILED, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.get(name=name) - status = poll.compute_status.state + poll = self.get_deployment(app_name=app_name, deployment_id=deployment_id) + status = poll.status.state status_message = f'current status: {status}' - if poll.compute_status: - status_message = poll.compute_status.message + if poll.status: + status_message = poll.status.message if status in target_states: return poll if callback: callback(poll) if status in failure_states: - msg = f'failed to reach STOPPED, got {status}: {status_message}' + msg = f'failed to reach SUCCEEDED, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"name={name}" + prefix = f"app_name={app_name}, deployment_id={deployment_id}" sleep = attempt if sleep > 10: # sleep 10s max per attempt @@ -845,31 +755,29 @@ def wait_get_app_stopped(self, attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_get_deployment_app_succeeded( - self, - app_name: str, - deployment_id: str, - timeout=timedelta(minutes=20), - callback: Optional[Callable[[AppDeployment], None]] = None) -> AppDeployment: + def wait_get_app_stopped(self, + name: str, + timeout=timedelta(minutes=20), + callback: Optional[Callable[[App], None]] = None) -> App: deadline = time.time() + timeout.total_seconds() - target_states = (AppDeploymentState.SUCCEEDED, ) - failure_states = (AppDeploymentState.FAILED, ) + target_states = (ComputeState.STOPPED, ) + failure_states = (ComputeState.ERROR, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.get_deployment(app_name=app_name, deployment_id=deployment_id) - status = poll.status.state + poll = self.get(name=name) + status = poll.compute_status.state status_message = f'current status: {status}' - if poll.status: - status_message = poll.status.message + if poll.compute_status: + status_message = poll.compute_status.message if status in target_states: return poll if callback: callback(poll) if status in failure_states: - msg = f'failed to reach SUCCEEDED, got {status}: {status_message}' + msg = f'failed to reach STOPPED, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"app_name={app_name}, deployment_id={deployment_id}" + prefix = f"name={name}" sleep = attempt if sleep > 10: # sleep 10s max per attempt @@ -879,43 +787,25 @@ def wait_get_deployment_app_succeeded( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def create(self, - name: str, - *, - description: Optional[str] = None, - resources: Optional[List[AppResource]] = None) -> Wait[App]: + def create(self, *, app: Optional[App] = None) -> Wait[App]: """Create an app. Creates a new app. - :param name: str - The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It - must be unique within the workspace. - :param description: str (optional) - The description of the app. - :param resources: List[:class:`AppResource`] (optional) - Resources for the app. + :param app: :class:`App` (optional) :returns: Long-running operation waiter for :class:`App`. See :method:wait_get_app_active for more details. """ - body = {} - if description is not None: body['description'] = description - if name is not None: body['name'] = name - if resources is not None: body['resources'] = [v.as_dict() for v in resources] + body = app headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } op_response = self._api.do('POST', '/api/2.0/apps', body=body, headers=headers) return Wait(self.wait_get_app_active, response=App.from_dict(op_response), name=op_response['name']) - def create_and_wait(self, - name: str, - *, - description: Optional[str] = None, - resources: Optional[List[AppResource]] = None, - timeout=timedelta(minutes=20)) -> App: - return self.create(description=description, name=name, resources=resources).result(timeout=timeout) + def create_and_wait(self, *, app: Optional[App] = None, timeout=timedelta(minutes=20)) -> App: + return self.create(app=app).result(timeout=timeout) def delete(self, name: str) -> App: """Delete an app. @@ -933,37 +823,20 @@ def delete(self, name: str) -> App: res = self._api.do('DELETE', f'/api/2.0/apps/{name}', headers=headers) return App.from_dict(res) - def deploy(self, - app_name: str, - *, - deployment_id: Optional[str] = None, - mode: Optional[AppDeploymentMode] = None, - source_code_path: Optional[str] = None) -> Wait[AppDeployment]: + def deploy(self, app_name: str, *, app_deployment: Optional[AppDeployment] = None) -> Wait[AppDeployment]: """Create an app deployment. Creates an app deployment for the app with the supplied name. :param app_name: str The name of the app. - :param deployment_id: str (optional) - The unique id of the deployment. - :param mode: :class:`AppDeploymentMode` (optional) - The mode of which the deployment will manage the source code. - :param source_code_path: str (optional) - The workspace file system path of the source code used to create the app deployment. This is - different from `deployment_artifacts.source_code_path`, which is the path used by the deployed app. - The former refers to the original source code location of the app in the workspace during deployment - creation, whereas the latter provides a system generated stable snapshotted source code path used by - the deployment. + :param app_deployment: :class:`AppDeployment` (optional) :returns: Long-running operation waiter for :class:`AppDeployment`. See :method:wait_get_deployment_app_succeeded for more details. """ - body = {} - if deployment_id is not None: body['deployment_id'] = deployment_id - if mode is not None: body['mode'] = mode.value - if source_code_path is not None: body['source_code_path'] = source_code_path + body = app_deployment headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } op_response = self._api.do('POST', @@ -975,18 +848,12 @@ def deploy(self, app_name=app_name, deployment_id=op_response['deployment_id']) - def deploy_and_wait( - self, - app_name: str, - *, - deployment_id: Optional[str] = None, - mode: Optional[AppDeploymentMode] = None, - source_code_path: Optional[str] = None, - timeout=timedelta(minutes=20)) -> AppDeployment: - return self.deploy(app_name=app_name, - deployment_id=deployment_id, - mode=mode, - source_code_path=source_code_path).result(timeout=timeout) + def deploy_and_wait(self, + app_name: str, + *, + app_deployment: Optional[AppDeployment] = None, + timeout=timedelta(minutes=20)) -> AppDeployment: + return self.deploy(app_deployment=app_deployment, app_name=app_name).result(timeout=timeout) def get(self, name: str) -> App: """Get an app. @@ -1121,7 +988,8 @@ def set_permissions( access_control_list: Optional[List[AppAccessControlRequest]] = None) -> AppPermissions: """Set app permissions. - Sets permissions on an app. Apps can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param app_name: str The app for which to get or manage permissions. @@ -1179,28 +1047,18 @@ def stop(self, name: str) -> Wait[App]: def stop_and_wait(self, name: str, timeout=timedelta(minutes=20)) -> App: return self.stop(name=name).result(timeout=timeout) - def update(self, - name: str, - *, - description: Optional[str] = None, - resources: Optional[List[AppResource]] = None) -> App: + def update(self, name: str, *, app: Optional[App] = None) -> App: """Update an app. Updates the app with the supplied name. :param name: str - The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It - must be unique within the workspace. - :param description: str (optional) - The description of the app. - :param resources: List[:class:`AppResource`] (optional) - Resources for the app. + The name of the app. + :param app: :class:`App` (optional) :returns: :class:`App` """ - body = {} - if description is not None: body['description'] = description - if resources is not None: body['resources'] = [v.as_dict() for v in resources] + body = app headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do('PATCH', f'/api/2.0/apps/{name}', body=body, headers=headers) diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index b149dbba..3943608e 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -3,11 +3,15 @@ from __future__ import annotations import logging +import random +import time from dataclasses import dataclass +from datetime import timedelta from enum import Enum -from typing import Dict, Iterator, List, Optional +from typing import Callable, Dict, Iterator, List, Optional -from ._internal import _enum, _from_dict, _repeated_dict, _repeated_enum +from ..errors import OperationFailed +from ._internal import Wait, _enum, _from_dict, _repeated_dict, _repeated_enum _LOG = logging.getLogger('databricks.sdk') @@ -310,6 +314,36 @@ def from_dict(cls, d: Dict[str, any]) -> AwsCredentials: session_token=d.get('session_token', None)) +@dataclass +class AwsIamRole: + """The AWS IAM role configuration""" + + external_id: Optional[str] = None + """The external ID used in role assumption to prevent the confused deputy problem.""" + + role_arn: Optional[str] = None + """The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary credentials.""" + + unity_catalog_iam_arn: Optional[str] = None + """The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. This is the identity + that is going to assume the AWS IAM role.""" + + def as_dict(self) -> dict: + """Serializes the AwsIamRole into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.external_id is not None: body['external_id'] = self.external_id + if self.role_arn is not None: body['role_arn'] = self.role_arn + if self.unity_catalog_iam_arn is not None: body['unity_catalog_iam_arn'] = self.unity_catalog_iam_arn + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AwsIamRole: + """Deserializes the AwsIamRole from a dictionary.""" + return cls(external_id=d.get('external_id', None), + role_arn=d.get('role_arn', None), + unity_catalog_iam_arn=d.get('unity_catalog_iam_arn', None)) + + @dataclass class AwsIamRoleRequest: role_arn: str @@ -355,6 +389,64 @@ def from_dict(cls, d: Dict[str, any]) -> AwsIamRoleResponse: unity_catalog_iam_arn=d.get('unity_catalog_iam_arn', None)) +@dataclass +class AzureActiveDirectoryToken: + """Azure Active Directory token, essentially the Oauth token for Azure Service Principal or Managed + Identity. Read more at + https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token""" + + aad_token: Optional[str] = None + """Opaque token that contains claims that you can use in Azure Active Directory to access cloud + services.""" + + def as_dict(self) -> dict: + """Serializes the AzureActiveDirectoryToken into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aad_token is not None: body['aad_token'] = self.aad_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AzureActiveDirectoryToken: + """Deserializes the AzureActiveDirectoryToken from a dictionary.""" + return cls(aad_token=d.get('aad_token', None)) + + +@dataclass +class AzureManagedIdentity: + """The Azure managed identity configuration.""" + + access_connector_id: Optional[str] = None + """The Azure resource ID of the Azure Databricks Access Connector. Use the format + `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`.""" + + credential_id: Optional[str] = None + """The Databricks internal ID that represents this managed identity. This field is only used to + persist the credential_id once it is fetched from the credentials manager - as we only use the + protobuf serializer to store credentials, this ID gets persisted to the database. .""" + + managed_identity_id: Optional[str] = None + """The Azure resource ID of the managed identity. Use the format, + `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}` + This is only available for user-assgined identities. For system-assigned identities, the + access_connector_id is used to identify the identity. If this field is not provided, then we + assume the AzureManagedIdentity is using the system-assigned identity.""" + + def as_dict(self) -> dict: + """Serializes the AzureManagedIdentity into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.access_connector_id is not None: body['access_connector_id'] = self.access_connector_id + if self.credential_id is not None: body['credential_id'] = self.credential_id + if self.managed_identity_id is not None: body['managed_identity_id'] = self.managed_identity_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AzureManagedIdentity: + """Deserializes the AzureManagedIdentity from a dictionary.""" + return cls(access_connector_id=d.get('access_connector_id', None), + credential_id=d.get('credential_id', None), + managed_identity_id=d.get('managed_identity_id', None)) + + @dataclass class AzureManagedIdentityRequest: access_connector_id: str @@ -793,6 +885,7 @@ class ColumnTypeName(Enum): TIMESTAMP = 'TIMESTAMP' TIMESTAMP_NTZ = 'TIMESTAMP_NTZ' USER_DEFINED_TYPE = 'USER_DEFINED_TYPE' + VARIANT = 'VARIANT' @dataclass @@ -1066,6 +1159,49 @@ def from_dict(cls, d: Dict[str, any]) -> CreateConnection: read_only=d.get('read_only', None)) +@dataclass +class CreateCredentialRequest: + aws_iam_role: Optional[AwsIamRole] = None + """The AWS IAM role configuration""" + + azure_managed_identity: Optional[AzureManagedIdentity] = None + """The Azure managed identity configuration.""" + + comment: Optional[str] = None + """Comment associated with the credential.""" + + name: Optional[str] = None + """The credential name. The name must be unique among storage and service credentials within the + metastore.""" + + purpose: Optional[CredentialPurpose] = None + """Indicates the purpose of the credential.""" + + skip_validation: Optional[bool] = None + """Optional. Supplying true to this argument skips validation of the created set of credentials.""" + + def as_dict(self) -> dict: + """Serializes the CreateCredentialRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_iam_role: body['aws_iam_role'] = self.aws_iam_role.as_dict() + if self.azure_managed_identity: body['azure_managed_identity'] = self.azure_managed_identity.as_dict() + if self.comment is not None: body['comment'] = self.comment + if self.name is not None: body['name'] = self.name + if self.purpose is not None: body['purpose'] = self.purpose.value + if self.skip_validation is not None: body['skip_validation'] = self.skip_validation + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> CreateCredentialRequest: + """Deserializes the CreateCredentialRequest from a dictionary.""" + return cls(aws_iam_role=_from_dict(d, 'aws_iam_role', AwsIamRole), + azure_managed_identity=_from_dict(d, 'azure_managed_identity', AzureManagedIdentity), + comment=d.get('comment', None), + name=d.get('name', None), + purpose=_enum(d, 'purpose', CredentialPurpose), + skip_validation=d.get('skip_validation', None)) + + @dataclass class CreateExternalLocation: name: str @@ -1278,7 +1414,7 @@ class CreateFunctionRoutineBody(Enum): class CreateFunctionSecurityType(Enum): - """Function security type.""" + """The security type of the function.""" DEFINER = 'DEFINER' @@ -1439,29 +1575,6 @@ def from_dict(cls, d: Dict[str, any]) -> CreateMonitor: warehouse_id=d.get('warehouse_id', None)) -@dataclass -class CreateOnlineTableRequest: - """Online Table information.""" - - name: Optional[str] = None - """Full three-part (catalog, schema, table) name of the table.""" - - spec: Optional[OnlineTableSpec] = None - """Specification of the online table.""" - - def as_dict(self) -> dict: - """Serializes the CreateOnlineTableRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.name is not None: body['name'] = self.name - if self.spec: body['spec'] = self.spec.as_dict() - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateOnlineTableRequest: - """Deserializes the CreateOnlineTableRequest from a dictionary.""" - return cls(name=d.get('name', None), spec=_from_dict(d, 'spec', OnlineTableSpec)) - - @dataclass class CreateRegisteredModelRequest: catalog_name: str @@ -1675,6 +1788,94 @@ def from_dict(cls, d: Dict[str, any]) -> CreateVolumeRequestContent: volume_type=_enum(d, 'volume_type', VolumeType)) +@dataclass +class CredentialInfo: + aws_iam_role: Optional[AwsIamRole] = None + """The AWS IAM role configuration""" + + azure_managed_identity: Optional[AzureManagedIdentity] = None + """The Azure managed identity configuration.""" + + comment: Optional[str] = None + """Comment associated with the credential.""" + + created_at: Optional[int] = None + """Time at which this credential was created, in epoch milliseconds.""" + + created_by: Optional[str] = None + """Username of credential creator.""" + + full_name: Optional[str] = None + """The full name of the credential.""" + + id: Optional[str] = None + """The unique identifier of the credential.""" + + isolation_mode: Optional[IsolationMode] = None + """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" + + metastore_id: Optional[str] = None + """Unique identifier of the parent metastore.""" + + name: Optional[str] = None + """The credential name. The name must be unique among storage and service credentials within the + metastore.""" + + owner: Optional[str] = None + """Username of current owner of credential.""" + + purpose: Optional[CredentialPurpose] = None + """Indicates the purpose of the credential.""" + + updated_at: Optional[int] = None + """Time at which this credential was last modified, in epoch milliseconds.""" + + updated_by: Optional[str] = None + """Username of user who last modified the credential.""" + + def as_dict(self) -> dict: + """Serializes the CredentialInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_iam_role: body['aws_iam_role'] = self.aws_iam_role.as_dict() + if self.azure_managed_identity: body['azure_managed_identity'] = self.azure_managed_identity.as_dict() + if self.comment is not None: body['comment'] = self.comment + if self.created_at is not None: body['created_at'] = self.created_at + if self.created_by is not None: body['created_by'] = self.created_by + if self.full_name is not None: body['full_name'] = self.full_name + if self.id is not None: body['id'] = self.id + if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value + if self.metastore_id is not None: body['metastore_id'] = self.metastore_id + if self.name is not None: body['name'] = self.name + if self.owner is not None: body['owner'] = self.owner + if self.purpose is not None: body['purpose'] = self.purpose.value + if self.updated_at is not None: body['updated_at'] = self.updated_at + if self.updated_by is not None: body['updated_by'] = self.updated_by + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> CredentialInfo: + """Deserializes the CredentialInfo from a dictionary.""" + return cls(aws_iam_role=_from_dict(d, 'aws_iam_role', AwsIamRole), + azure_managed_identity=_from_dict(d, 'azure_managed_identity', AzureManagedIdentity), + comment=d.get('comment', None), + created_at=d.get('created_at', None), + created_by=d.get('created_by', None), + full_name=d.get('full_name', None), + id=d.get('id', None), + isolation_mode=_enum(d, 'isolation_mode', IsolationMode), + metastore_id=d.get('metastore_id', None), + name=d.get('name', None), + owner=d.get('owner', None), + purpose=_enum(d, 'purpose', CredentialPurpose), + updated_at=d.get('updated_at', None), + updated_by=d.get('updated_by', None)) + + +class CredentialPurpose(Enum): + + SERVICE = 'SERVICE' + + class CredentialType(Enum): """The type of credential.""" @@ -1682,6 +1883,27 @@ class CredentialType(Enum): USERNAME_PASSWORD = 'USERNAME_PASSWORD' +@dataclass +class CredentialValidationResult: + message: Optional[str] = None + """Error message would exist when the result does not equal to **PASS**.""" + + result: Optional[ValidateCredentialResult] = None + """The results of the tested operation.""" + + def as_dict(self) -> dict: + """Serializes the CredentialValidationResult into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.message is not None: body['message'] = self.message + if self.result is not None: body['result'] = self.result.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> CredentialValidationResult: + """Deserializes the CredentialValidationResult from a dictionary.""" + return cls(message=d.get('message', None), result=_enum(d, 'result', ValidateCredentialResult)) + + @dataclass class CurrentWorkspaceBindings: """Currently assigned workspaces""" @@ -1778,6 +2000,20 @@ def from_dict(cls, d: Dict[str, any]) -> DeleteAliasResponse: return cls() +@dataclass +class DeleteCredentialResponse: + + def as_dict(self) -> dict: + """Serializes the DeleteCredentialResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> DeleteCredentialResponse: + """Deserializes the DeleteCredentialResponse from a dictionary.""" + return cls() + + @dataclass class DeleteResponse: @@ -2052,7 +2288,6 @@ class ExternalLocationInfo: sufficient.""" isolation_mode: Optional[IsolationMode] = None - """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" metastore_id: Optional[str] = None """Unique identifier of metastore hosting the external location.""" @@ -2382,7 +2617,7 @@ class FunctionInfoRoutineBody(Enum): class FunctionInfoSecurityType(Enum): - """Function security type.""" + """The security type of the function.""" DEFINER = 'DEFINER' @@ -2516,6 +2751,50 @@ def from_dict(cls, d: Dict[str, any]) -> GcpOauthToken: return cls(oauth_token=d.get('oauth_token', None)) +@dataclass +class GenerateTemporaryServiceCredentialAzureOptions: + """Options to customize the requested temporary credential""" + + resources: Optional[List[str]] = None + """The resources to which the temporary Azure credential should apply. These resources are the + scopes that are passed to the token provider (see + https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential?view=azure-python)""" + + def as_dict(self) -> dict: + """Serializes the GenerateTemporaryServiceCredentialAzureOptions into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.resources: body['resources'] = [v for v in self.resources] + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> GenerateTemporaryServiceCredentialAzureOptions: + """Deserializes the GenerateTemporaryServiceCredentialAzureOptions from a dictionary.""" + return cls(resources=d.get('resources', None)) + + +@dataclass +class GenerateTemporaryServiceCredentialRequest: + azure_options: Optional[GenerateTemporaryServiceCredentialAzureOptions] = None + """Options to customize the requested temporary credential""" + + credential_name: Optional[str] = None + """The name of the service credential used to generate a temporary credential""" + + def as_dict(self) -> dict: + """Serializes the GenerateTemporaryServiceCredentialRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.azure_options: body['azure_options'] = self.azure_options.as_dict() + if self.credential_name is not None: body['credential_name'] = self.credential_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> GenerateTemporaryServiceCredentialRequest: + """Deserializes the GenerateTemporaryServiceCredentialRequest from a dictionary.""" + return cls(azure_options=_from_dict(d, 'azure_options', + GenerateTemporaryServiceCredentialAzureOptions), + credential_name=d.get('credential_name', None)) + + @dataclass class GenerateTemporaryTableCredentialRequest: operation: Optional[TableOperation] = None @@ -2545,6 +2824,11 @@ class GenerateTemporaryTableCredentialResponse: """AWS temporary credentials for API authentication. Read more at https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html.""" + azure_aad: Optional[AzureActiveDirectoryToken] = None + """Azure Active Directory token, essentially the Oauth token for Azure Service Principal or Managed + Identity. Read more at + https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token""" + azure_user_delegation_sas: Optional[AzureUserDelegationSas] = None """Azure temporary credentials for API authentication. Read more at https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas""" @@ -2568,6 +2852,7 @@ def as_dict(self) -> dict: """Serializes the GenerateTemporaryTableCredentialResponse into a dictionary suitable for use as a JSON request body.""" body = {} if self.aws_temp_credentials: body['aws_temp_credentials'] = self.aws_temp_credentials.as_dict() + if self.azure_aad: body['azure_aad'] = self.azure_aad.as_dict() if self.azure_user_delegation_sas: body['azure_user_delegation_sas'] = self.azure_user_delegation_sas.as_dict() if self.expiration_time is not None: body['expiration_time'] = self.expiration_time @@ -2580,6 +2865,7 @@ def as_dict(self) -> dict: def from_dict(cls, d: Dict[str, any]) -> GenerateTemporaryTableCredentialResponse: """Deserializes the GenerateTemporaryTableCredentialResponse from a dictionary.""" return cls(aws_temp_credentials=_from_dict(d, 'aws_temp_credentials', AwsCredentials), + azure_aad=_from_dict(d, 'azure_aad', AzureActiveDirectoryToken), azure_user_delegation_sas=_from_dict(d, 'azure_user_delegation_sas', AzureUserDelegationSas), expiration_time=d.get('expiration_time', None), @@ -2592,6 +2878,7 @@ class GetBindingsSecurableType(Enum): CATALOG = 'catalog' EXTERNAL_LOCATION = 'external_location' + SERVICE_CREDENTIAL = 'service_credential' STORAGE_CREDENTIAL = 'storage_credential' @@ -2738,7 +3025,6 @@ def from_dict(cls, d: Dict[str, any]) -> GetQuotaResponse: class IsolationMode(Enum): - """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" ISOLATION_MODE_ISOLATED = 'ISOLATION_MODE_ISOLATED' ISOLATION_MODE_OPEN = 'ISOLATION_MODE_OPEN' @@ -2826,6 +3112,28 @@ def from_dict(cls, d: Dict[str, any]) -> ListConnectionsResponse: next_page_token=d.get('next_page_token', None)) +@dataclass +class ListCredentialsResponse: + credentials: Optional[List[CredentialInfo]] = None + + next_page_token: Optional[str] = None + """Opaque token to retrieve the next page of results. Absent if there are no more pages. + __page_token__ should be set to this value for the next request (for the next page of results).""" + + def as_dict(self) -> dict: + """Serializes the ListCredentialsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.credentials: body['credentials'] = [v.as_dict() for v in self.credentials] + if self.next_page_token is not None: body['next_page_token'] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ListCredentialsResponse: + """Deserializes the ListCredentialsResponse from a dictionary.""" + return cls(credentials=_repeated_dict(d, 'credentials', CredentialInfo), + next_page_token=d.get('next_page_token', None)) + + @dataclass class ListExternalLocationsResponse: external_locations: Optional[List[ExternalLocationInfo]] = None @@ -4619,6 +4927,7 @@ class SecurableType(Enum): CATALOG = 'catalog' CONNECTION = 'connection' + CREDENTIAL = 'credential' EXTERNAL_LOCATION = 'external_location' FUNCTION = 'function' METASTORE = 'metastore' @@ -4738,11 +5047,13 @@ class StorageCredentialInfo: databricks_gcp_service_account: Optional[DatabricksGcpServiceAccountResponse] = None """The Databricks managed GCP service account configuration.""" + full_name: Optional[str] = None + """The full name of the credential.""" + id: Optional[str] = None """The unique identifier of the credential.""" isolation_mode: Optional[IsolationMode] = None - """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" metastore_id: Optional[str] = None """Unique identifier of parent metastore.""" @@ -4778,6 +5089,7 @@ def as_dict(self) -> dict: if self.created_by is not None: body['created_by'] = self.created_by if self.databricks_gcp_service_account: body['databricks_gcp_service_account'] = self.databricks_gcp_service_account.as_dict() + if self.full_name is not None: body['full_name'] = self.full_name if self.id is not None: body['id'] = self.id if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value if self.metastore_id is not None: body['metastore_id'] = self.metastore_id @@ -4803,6 +5115,7 @@ def from_dict(cls, d: Dict[str, any]) -> StorageCredentialInfo: created_by=d.get('created_by', None), databricks_gcp_service_account=_from_dict(d, 'databricks_gcp_service_account', DatabricksGcpServiceAccountResponse), + full_name=d.get('full_name', None), id=d.get('id', None), isolation_mode=_enum(d, 'isolation_mode', IsolationMode), metastore_id=d.get('metastore_id', None), @@ -5158,6 +5471,37 @@ class TableType(Enum): VIEW = 'VIEW' +@dataclass +class TemporaryCredentials: + aws_temp_credentials: Optional[AwsCredentials] = None + """AWS temporary credentials for API authentication. Read more at + https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html.""" + + azure_aad: Optional[AzureActiveDirectoryToken] = None + """Azure Active Directory token, essentially the Oauth token for Azure Service Principal or Managed + Identity. Read more at + https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token""" + + expiration_time: Optional[int] = None + """Server time when the credential will expire, in epoch milliseconds. The API client is advised to + cache the credential given this expiration time.""" + + def as_dict(self) -> dict: + """Serializes the TemporaryCredentials into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_temp_credentials: body['aws_temp_credentials'] = self.aws_temp_credentials.as_dict() + if self.azure_aad: body['azure_aad'] = self.azure_aad.as_dict() + if self.expiration_time is not None: body['expiration_time'] = self.expiration_time + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> TemporaryCredentials: + """Deserializes the TemporaryCredentials from a dictionary.""" + return cls(aws_temp_credentials=_from_dict(d, 'aws_temp_credentials', AwsCredentials), + azure_aad=_from_dict(d, 'azure_aad', AzureActiveDirectoryToken), + expiration_time=d.get('expiration_time', None)) + + @dataclass class TriggeredUpdateStatus: """Detailed status of an online table. Shown if the online table is in the ONLINE_TRIGGERED_UPDATE @@ -5224,6 +5568,7 @@ class UpdateBindingsSecurableType(Enum): CATALOG = 'catalog' EXTERNAL_LOCATION = 'external_location' + SERVICE_CREDENTIAL = 'service_credential' STORAGE_CREDENTIAL = 'storage_credential' @@ -5308,6 +5653,63 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateConnection: owner=d.get('owner', None)) +@dataclass +class UpdateCredentialRequest: + aws_iam_role: Optional[AwsIamRole] = None + """The AWS IAM role configuration""" + + azure_managed_identity: Optional[AzureManagedIdentity] = None + """The Azure managed identity configuration.""" + + comment: Optional[str] = None + """Comment associated with the credential.""" + + force: Optional[bool] = None + """Force update even if there are dependent services.""" + + isolation_mode: Optional[IsolationMode] = None + """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" + + name_arg: Optional[str] = None + """Name of the credential.""" + + new_name: Optional[str] = None + """New name of credential.""" + + owner: Optional[str] = None + """Username of current owner of credential.""" + + skip_validation: Optional[bool] = None + """Supply true to this argument to skip validation of the updated credential.""" + + def as_dict(self) -> dict: + """Serializes the UpdateCredentialRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_iam_role: body['aws_iam_role'] = self.aws_iam_role.as_dict() + if self.azure_managed_identity: body['azure_managed_identity'] = self.azure_managed_identity.as_dict() + if self.comment is not None: body['comment'] = self.comment + if self.force is not None: body['force'] = self.force + if self.isolation_mode is not None: body['isolation_mode'] = self.isolation_mode.value + if self.name_arg is not None: body['name_arg'] = self.name_arg + if self.new_name is not None: body['new_name'] = self.new_name + if self.owner is not None: body['owner'] = self.owner + if self.skip_validation is not None: body['skip_validation'] = self.skip_validation + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdateCredentialRequest: + """Deserializes the UpdateCredentialRequest from a dictionary.""" + return cls(aws_iam_role=_from_dict(d, 'aws_iam_role', AwsIamRole), + azure_managed_identity=_from_dict(d, 'azure_managed_identity', AzureManagedIdentity), + comment=d.get('comment', None), + force=d.get('force', None), + isolation_mode=_enum(d, 'isolation_mode', IsolationMode), + name_arg=d.get('name_arg', None), + new_name=d.get('new_name', None), + owner=d.get('owner', None), + skip_validation=d.get('skip_validation', None)) + + @dataclass class UpdateExternalLocation: access_point: Optional[str] = None @@ -5331,7 +5733,6 @@ class UpdateExternalLocation: """Force update even if changing url invalidates dependent external tables or mounts.""" isolation_mode: Optional[IsolationMode] = None - """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" name: Optional[str] = None """Name of the external location.""" @@ -5751,7 +6152,6 @@ class UpdateStorageCredential: """Force update even if there are dependent external locations or external tables.""" isolation_mode: Optional[IsolationMode] = None - """Whether the current securable is accessible from all workspaces or a specific set of workspaces.""" name: Optional[str] = None """Name of the storage credential.""" @@ -5899,6 +6299,63 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateWorkspaceBindingsParameters: securable_type=_enum(d, 'securable_type', UpdateBindingsSecurableType)) +@dataclass +class ValidateCredentialRequest: + aws_iam_role: Optional[AwsIamRole] = None + """The AWS IAM role configuration""" + + azure_managed_identity: Optional[AzureManagedIdentity] = None + """The Azure managed identity configuration.""" + + credential_name: Optional[str] = None + """Required. The name of an existing credential or long-lived cloud credential to validate.""" + + purpose: Optional[CredentialPurpose] = None + """The purpose of the credential. This should only be used when the credential is specified.""" + + def as_dict(self) -> dict: + """Serializes the ValidateCredentialRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aws_iam_role: body['aws_iam_role'] = self.aws_iam_role.as_dict() + if self.azure_managed_identity: body['azure_managed_identity'] = self.azure_managed_identity.as_dict() + if self.credential_name is not None: body['credential_name'] = self.credential_name + if self.purpose is not None: body['purpose'] = self.purpose.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ValidateCredentialRequest: + """Deserializes the ValidateCredentialRequest from a dictionary.""" + return cls(aws_iam_role=_from_dict(d, 'aws_iam_role', AwsIamRole), + azure_managed_identity=_from_dict(d, 'azure_managed_identity', AzureManagedIdentity), + credential_name=d.get('credential_name', None), + purpose=_enum(d, 'purpose', CredentialPurpose)) + + +@dataclass +class ValidateCredentialResponse: + results: Optional[List[CredentialValidationResult]] = None + """The results of the validation check.""" + + def as_dict(self) -> dict: + """Serializes the ValidateCredentialResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.results: body['results'] = [v.as_dict() for v in self.results] + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ValidateCredentialResponse: + """Deserializes the ValidateCredentialResponse from a dictionary.""" + return cls(results=_repeated_dict(d, 'results', CredentialValidationResult)) + + +class ValidateCredentialResult(Enum): + """A enum represents the result of the file operation""" + + FAIL = 'FAIL' + PASS = 'PASS' + SKIP = 'SKIP' + + @dataclass class ValidateStorageCredential: aws_iam_role: Optional[AwsIamRoleRequest] = None @@ -6935,6 +7392,258 @@ def update(self, return ConnectionInfo.from_dict(res) +class CredentialsAPI: + """A credential represents an authentication and authorization mechanism for accessing services on your cloud + tenant. Each credential is subject to Unity Catalog access-control policies that control which users and + groups can access the credential. + + To create credentials, you must be a Databricks account admin or have the `CREATE SERVICE CREDENTIAL + privilege. The user who creates the credential can delegate ownership to another user or group to manage + permissions on it""" + + def __init__(self, api_client): + self._api = api_client + + def create_credential(self, + *, + aws_iam_role: Optional[AwsIamRole] = None, + azure_managed_identity: Optional[AzureManagedIdentity] = None, + comment: Optional[str] = None, + name: Optional[str] = None, + purpose: Optional[CredentialPurpose] = None, + skip_validation: Optional[bool] = None) -> CredentialInfo: + """Create a credential. + + Creates a new credential. + + :param aws_iam_role: :class:`AwsIamRole` (optional) + The AWS IAM role configuration + :param azure_managed_identity: :class:`AzureManagedIdentity` (optional) + The Azure managed identity configuration. + :param comment: str (optional) + Comment associated with the credential. + :param name: str (optional) + The credential name. The name must be unique among storage and service credentials within the + metastore. + :param purpose: :class:`CredentialPurpose` (optional) + Indicates the purpose of the credential. + :param skip_validation: bool (optional) + Optional. Supplying true to this argument skips validation of the created set of credentials. + + :returns: :class:`CredentialInfo` + """ + body = {} + if aws_iam_role is not None: body['aws_iam_role'] = aws_iam_role.as_dict() + if azure_managed_identity is not None: + body['azure_managed_identity'] = azure_managed_identity.as_dict() + if comment is not None: body['comment'] = comment + if name is not None: body['name'] = name + if purpose is not None: body['purpose'] = purpose.value + if skip_validation is not None: body['skip_validation'] = skip_validation + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('POST', '/api/2.1/unity-catalog/credentials', body=body, headers=headers) + return CredentialInfo.from_dict(res) + + def delete_credential(self, name_arg: str, *, force: Optional[bool] = None): + """Delete a credential. + + Deletes a credential from the metastore. The caller must be an owner of the credential. + + :param name_arg: str + Name of the credential. + :param force: bool (optional) + Force deletion even if there are dependent services. + + + """ + + query = {} + if force is not None: query['force'] = force + headers = {'Accept': 'application/json', } + + self._api.do('DELETE', f'/api/2.1/unity-catalog/credentials/{name_arg}', query=query, headers=headers) + + def generate_temporary_service_credential( + self, + *, + azure_options: Optional[GenerateTemporaryServiceCredentialAzureOptions] = None, + credential_name: Optional[str] = None) -> TemporaryCredentials: + """Generate a temporary service credential. + + Returns a set of temporary credentials generated using the specified service credential. The caller + must be a metastore admin or have the metastore privilege **ACCESS** on the service credential. + + :param azure_options: :class:`GenerateTemporaryServiceCredentialAzureOptions` (optional) + Options to customize the requested temporary credential + :param credential_name: str (optional) + The name of the service credential used to generate a temporary credential + + :returns: :class:`TemporaryCredentials` + """ + body = {} + if azure_options is not None: body['azure_options'] = azure_options.as_dict() + if credential_name is not None: body['credential_name'] = credential_name + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('POST', + '/api/2.1/unity-catalog/temporary-service-credentials', + body=body, + headers=headers) + return TemporaryCredentials.from_dict(res) + + def get_credential(self, name_arg: str) -> CredentialInfo: + """Get a credential. + + Gets a credential from the metastore. The caller must be a metastore admin, the owner of the + credential, or have any permission on the credential. + + :param name_arg: str + Name of the credential. + + :returns: :class:`CredentialInfo` + """ + + headers = {'Accept': 'application/json', } + + res = self._api.do('GET', f'/api/2.1/unity-catalog/credentials/{name_arg}', headers=headers) + return CredentialInfo.from_dict(res) + + def list_credentials(self, + *, + max_results: Optional[int] = None, + page_token: Optional[str] = None, + purpose: Optional[CredentialPurpose] = None) -> Iterator[CredentialInfo]: + """List credentials. + + Gets an array of credentials (as __CredentialInfo__ objects). + + The array is limited to only the credentials that the caller has permission to access. If the caller + is a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a specific + ordering of the elements in the array. + + :param max_results: int (optional) + Maximum number of credentials to return. - If not set, the default max page size is used. - When set + to a value greater than 0, the page length is the minimum of this value and a server-configured + value. - When set to 0, the page length is set to a server-configured value (recommended). - When + set to a value less than 0, an invalid parameter error is returned. + :param page_token: str (optional) + Opaque token to retrieve the next page of results. + :param purpose: :class:`CredentialPurpose` (optional) + Return only credentials for the specified purpose. + + :returns: Iterator over :class:`CredentialInfo` + """ + + query = {} + if max_results is not None: query['max_results'] = max_results + if page_token is not None: query['page_token'] = page_token + if purpose is not None: query['purpose'] = purpose.value + headers = {'Accept': 'application/json', } + + while True: + json = self._api.do('GET', '/api/2.1/unity-catalog/credentials', query=query, headers=headers) + if 'credentials' in json: + for v in json['credentials']: + yield CredentialInfo.from_dict(v) + if 'next_page_token' not in json or not json['next_page_token']: + return + query['page_token'] = json['next_page_token'] + + def update_credential(self, + name_arg: str, + *, + aws_iam_role: Optional[AwsIamRole] = None, + azure_managed_identity: Optional[AzureManagedIdentity] = None, + comment: Optional[str] = None, + force: Optional[bool] = None, + isolation_mode: Optional[IsolationMode] = None, + new_name: Optional[str] = None, + owner: Optional[str] = None, + skip_validation: Optional[bool] = None) -> CredentialInfo: + """Update a credential. + + Updates a credential on the metastore. + + The caller must be the owner of the credential or a metastore admin or have the `MANAGE` permission. + If the caller is a metastore admin, only the __owner__ field can be changed. + + :param name_arg: str + Name of the credential. + :param aws_iam_role: :class:`AwsIamRole` (optional) + The AWS IAM role configuration + :param azure_managed_identity: :class:`AzureManagedIdentity` (optional) + The Azure managed identity configuration. + :param comment: str (optional) + Comment associated with the credential. + :param force: bool (optional) + Force update even if there are dependent services. + :param isolation_mode: :class:`IsolationMode` (optional) + Whether the current securable is accessible from all workspaces or a specific set of workspaces. + :param new_name: str (optional) + New name of credential. + :param owner: str (optional) + Username of current owner of credential. + :param skip_validation: bool (optional) + Supply true to this argument to skip validation of the updated credential. + + :returns: :class:`CredentialInfo` + """ + body = {} + if aws_iam_role is not None: body['aws_iam_role'] = aws_iam_role.as_dict() + if azure_managed_identity is not None: + body['azure_managed_identity'] = azure_managed_identity.as_dict() + if comment is not None: body['comment'] = comment + if force is not None: body['force'] = force + if isolation_mode is not None: body['isolation_mode'] = isolation_mode.value + if new_name is not None: body['new_name'] = new_name + if owner is not None: body['owner'] = owner + if skip_validation is not None: body['skip_validation'] = skip_validation + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('PATCH', + f'/api/2.1/unity-catalog/credentials/{name_arg}', + body=body, + headers=headers) + return CredentialInfo.from_dict(res) + + def validate_credential(self, + *, + aws_iam_role: Optional[AwsIamRole] = None, + azure_managed_identity: Optional[AzureManagedIdentity] = None, + credential_name: Optional[str] = None, + purpose: Optional[CredentialPurpose] = None) -> ValidateCredentialResponse: + """Validate a credential. + + Validates a credential. + + Either the __credential_name__ or the cloud-specific credential must be provided. + + The caller must be a metastore admin or the credential owner. + + :param aws_iam_role: :class:`AwsIamRole` (optional) + The AWS IAM role configuration + :param azure_managed_identity: :class:`AzureManagedIdentity` (optional) + The Azure managed identity configuration. + :param credential_name: str (optional) + Required. The name of an existing credential or long-lived cloud credential to validate. + :param purpose: :class:`CredentialPurpose` (optional) + The purpose of the credential. This should only be used when the credential is specified. + + :returns: :class:`ValidateCredentialResponse` + """ + body = {} + if aws_iam_role is not None: body['aws_iam_role'] = aws_iam_role.as_dict() + if azure_managed_identity is not None: + body['azure_managed_identity'] = azure_managed_identity.as_dict() + if credential_name is not None: body['credential_name'] = credential_name + if purpose is not None: body['purpose'] = purpose.value + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('POST', '/api/2.1/unity-catalog/validate-credentials', body=body, headers=headers) + return ValidateCredentialResponse.from_dict(res) + + class ExternalLocationsAPI: """An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path. Each external location is subject to Unity Catalog @@ -7134,7 +7843,6 @@ def update(self, :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. :param isolation_mode: :class:`IsolationMode` (optional) - Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the external location. :param owner: str (optional) @@ -7890,25 +8598,61 @@ class OnlineTablesAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, name: Optional[str] = None, spec: Optional[OnlineTableSpec] = None) -> OnlineTable: + def wait_get_online_table_active(self, + name: str, + timeout=timedelta(minutes=20), + callback: Optional[Callable[[OnlineTable], None]] = None) -> OnlineTable: + deadline = time.time() + timeout.total_seconds() + target_states = (ProvisioningInfoState.ACTIVE, ) + failure_states = (ProvisioningInfoState.FAILED, ) + status_message = 'polling...' + attempt = 1 + while time.time() < deadline: + poll = self.get(name=name) + status = poll.unity_catalog_provisioning_state + status_message = f'current status: {status}' + if status in target_states: + return poll + if callback: + callback(poll) + if status in failure_states: + msg = f'failed to reach ACTIVE, got {status}: {status_message}' + raise OperationFailed(msg) + prefix = f"name={name}" + sleep = attempt + if sleep > 10: + # sleep 10s max per attempt + sleep = 10 + _LOG.debug(f'{prefix}: ({status}) {status_message} (sleeping ~{sleep}s)') + time.sleep(sleep + random.random()) + attempt += 1 + raise TimeoutError(f'timed out after {timeout}: {status_message}') + + def create(self, *, table: Optional[OnlineTable] = None) -> Wait[OnlineTable]: """Create an Online Table. Create a new Online Table. - :param name: str (optional) - Full three-part (catalog, schema, table) name of the table. - :param spec: :class:`OnlineTableSpec` (optional) - Specification of the online table. + :param table: :class:`OnlineTable` (optional) + Online Table information. - :returns: :class:`OnlineTable` + :returns: + Long-running operation waiter for :class:`OnlineTable`. + See :method:wait_get_online_table_active for more details. """ - body = {} - if name is not None: body['name'] = name - if spec is not None: body['spec'] = spec.as_dict() + body = table headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } - res = self._api.do('POST', '/api/2.0/online-tables', body=body, headers=headers) - return OnlineTable.from_dict(res) + op_response = self._api.do('POST', '/api/2.0/online-tables', body=body, headers=headers) + return Wait(self.wait_get_online_table_active, + response=OnlineTable.from_dict(op_response), + name=op_response['name']) + + def create_and_wait(self, + *, + table: Optional[OnlineTable] = None, + timeout=timedelta(minutes=20)) -> OnlineTable: + return self.create(table=table).result(timeout=timeout) def delete(self, name: str): """Delete an Online Table. @@ -9019,7 +9763,6 @@ def update(self, :param force: bool (optional) Force update even if there are dependent external locations or external tables. :param isolation_mode: :class:`IsolationMode` (optional) - Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the storage credential. :param owner: str (optional) diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 4a77496d..17567ab6 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -2661,7 +2661,7 @@ class EbsVolumeType(Enum): @dataclass class EditCluster: cluster_id: str - """ID of the cluser""" + """ID of the cluster""" spark_version: str """The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can @@ -6645,7 +6645,8 @@ def set_permissions( ) -> ClusterPolicyPermissions: """Set cluster policy permissions. - Sets permissions on a cluster policy. Cluster policies can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param cluster_policy_id: str The cluster policy for which to get or manage permissions. @@ -7145,7 +7146,7 @@ def edit(self, Clusters created by the Databricks Jobs service cannot be edited. :param cluster_id: str - ID of the cluser + ID of the cluster :param spark_version: str The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call. @@ -7672,7 +7673,8 @@ def set_permissions( access_control_list: Optional[List[ClusterAccessControlRequest]] = None) -> ClusterPermissions: """Set cluster permissions. - Sets permissions on a cluster. Clusters can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param cluster_id: str The cluster for which to get or manage permissions. @@ -7865,20 +7867,19 @@ def wait_command_status_command_execution_cancelled( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_command_status_command_execution_finished_or_error( + def wait_context_status_command_execution_running( self, cluster_id: str, - command_id: str, context_id: str, timeout=timedelta(minutes=20), - callback: Optional[Callable[[CommandStatusResponse], None]] = None) -> CommandStatusResponse: + callback: Optional[Callable[[ContextStatusResponse], None]] = None) -> ContextStatusResponse: deadline = time.time() + timeout.total_seconds() - target_states = (CommandStatus.FINISHED, CommandStatus.ERROR, ) - failure_states = (CommandStatus.CANCELLED, CommandStatus.CANCELLING, ) + target_states = (ContextStatus.RUNNING, ) + failure_states = (ContextStatus.ERROR, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.command_status(cluster_id=cluster_id, command_id=command_id, context_id=context_id) + poll = self.context_status(cluster_id=cluster_id, context_id=context_id) status = poll.status status_message = f'current status: {status}' if status in target_states: @@ -7886,9 +7887,9 @@ def wait_command_status_command_execution_finished_or_error( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach Finished or Error, got {status}: {status_message}' + msg = f'failed to reach Running, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"cluster_id={cluster_id}, command_id={command_id}, context_id={context_id}" + prefix = f"cluster_id={cluster_id}, context_id={context_id}" sleep = attempt if sleep > 10: # sleep 10s max per attempt @@ -7898,19 +7899,20 @@ def wait_command_status_command_execution_finished_or_error( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_context_status_command_execution_running( + def wait_command_status_command_execution_finished_or_error( self, cluster_id: str, + command_id: str, context_id: str, timeout=timedelta(minutes=20), - callback: Optional[Callable[[ContextStatusResponse], None]] = None) -> ContextStatusResponse: + callback: Optional[Callable[[CommandStatusResponse], None]] = None) -> CommandStatusResponse: deadline = time.time() + timeout.total_seconds() - target_states = (ContextStatus.RUNNING, ) - failure_states = (ContextStatus.ERROR, ) + target_states = (CommandStatus.FINISHED, CommandStatus.ERROR, ) + failure_states = (CommandStatus.CANCELLED, CommandStatus.CANCELLING, ) status_message = 'polling...' attempt = 1 while time.time() < deadline: - poll = self.context_status(cluster_id=cluster_id, context_id=context_id) + poll = self.command_status(cluster_id=cluster_id, command_id=command_id, context_id=context_id) status = poll.status status_message = f'current status: {status}' if status in target_states: @@ -7918,9 +7920,9 @@ def wait_context_status_command_execution_running( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach Running, got {status}: {status_message}' + msg = f'failed to reach Finished or Error, got {status}: {status_message}' raise OperationFailed(msg) - prefix = f"cluster_id={cluster_id}, context_id={context_id}" + prefix = f"cluster_id={cluster_id}, command_id={command_id}, context_id={context_id}" sleep = attempt if sleep > 10: # sleep 10s max per attempt @@ -8515,7 +8517,8 @@ def set_permissions( ) -> InstancePoolPermissions: """Set instance pool permissions. - Sets permissions on an instance pool. Instance pools can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param instance_pool_id: str The instance pool for which to get or manage permissions. diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 4a4c640e..1b02d8c8 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -20,103 +20,6 @@ # all definitions in this file are in alphabetical order -@dataclass -class CreateDashboardRequest: - display_name: str - """The display name of the dashboard.""" - - parent_path: Optional[str] = None - """The workspace path of the folder containing the dashboard. Includes leading slash and no - trailing slash. This field is excluded in List Dashboards responses.""" - - serialized_dashboard: Optional[str] = None - """The contents of the dashboard in serialized string form. This field is excluded in List - Dashboards responses. Use the [get dashboard API] to retrieve an example response, which - includes the `serialized_dashboard` field. This field provides the structure of the JSON string - that represents the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get""" - - warehouse_id: Optional[str] = None - """The warehouse ID used to run the dashboard.""" - - def as_dict(self) -> dict: - """Serializes the CreateDashboardRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.display_name is not None: body['display_name'] = self.display_name - if self.parent_path is not None: body['parent_path'] = self.parent_path - if self.serialized_dashboard is not None: body['serialized_dashboard'] = self.serialized_dashboard - if self.warehouse_id is not None: body['warehouse_id'] = self.warehouse_id - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateDashboardRequest: - """Deserializes the CreateDashboardRequest from a dictionary.""" - return cls(display_name=d.get('display_name', None), - parent_path=d.get('parent_path', None), - serialized_dashboard=d.get('serialized_dashboard', None), - warehouse_id=d.get('warehouse_id', None)) - - -@dataclass -class CreateScheduleRequest: - cron_schedule: CronSchedule - """The cron expression describing the frequency of the periodic refresh for this schedule.""" - - dashboard_id: Optional[str] = None - """UUID identifying the dashboard to which the schedule belongs.""" - - display_name: Optional[str] = None - """The display name for schedule.""" - - pause_status: Optional[SchedulePauseStatus] = None - """The status indicates whether this schedule is paused or not.""" - - def as_dict(self) -> dict: - """Serializes the CreateScheduleRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.cron_schedule: body['cron_schedule'] = self.cron_schedule.as_dict() - if self.dashboard_id is not None: body['dashboard_id'] = self.dashboard_id - if self.display_name is not None: body['display_name'] = self.display_name - if self.pause_status is not None: body['pause_status'] = self.pause_status.value - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateScheduleRequest: - """Deserializes the CreateScheduleRequest from a dictionary.""" - return cls(cron_schedule=_from_dict(d, 'cron_schedule', CronSchedule), - dashboard_id=d.get('dashboard_id', None), - display_name=d.get('display_name', None), - pause_status=_enum(d, 'pause_status', SchedulePauseStatus)) - - -@dataclass -class CreateSubscriptionRequest: - subscriber: Subscriber - """Subscriber details for users and destinations to be added as subscribers to the schedule.""" - - dashboard_id: Optional[str] = None - """UUID identifying the dashboard to which the subscription belongs.""" - - schedule_id: Optional[str] = None - """UUID identifying the schedule to which the subscription belongs.""" - - def as_dict(self) -> dict: - """Serializes the CreateSubscriptionRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.dashboard_id is not None: body['dashboard_id'] = self.dashboard_id - if self.schedule_id is not None: body['schedule_id'] = self.schedule_id - if self.subscriber: body['subscriber'] = self.subscriber.as_dict() - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateSubscriptionRequest: - """Deserializes the CreateSubscriptionRequest from a dictionary.""" - return cls(dashboard_id=d.get('dashboard_id', None), - schedule_id=d.get('schedule_id', None), - subscriber=_from_dict(d, 'subscriber', Subscriber)) - - @dataclass class CronSchedule: quartz_cron_expression: str @@ -607,6 +510,7 @@ class MessageErrorType(Enum): LOCAL_CONTEXT_EXCEEDED_EXCEPTION = 'LOCAL_CONTEXT_EXCEEDED_EXCEPTION' MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION = 'MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION' MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION = 'MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION' + NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE = 'NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE' NO_QUERY_TO_VISUALIZE_EXCEPTION = 'NO_QUERY_TO_VISUALIZE_EXCEPTION' NO_TABLES_TO_QUERY_EXCEPTION = 'NO_TABLES_TO_QUERY_EXCEPTION' RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION = 'RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION' @@ -839,6 +743,9 @@ class Schedule: update_time: Optional[str] = None """A timestamp indicating when the schedule was last updated.""" + warehouse_id: Optional[str] = None + """The warehouse id to run the dashboard with for the schedule.""" + def as_dict(self) -> dict: """Serializes the Schedule into a dictionary suitable for use as a JSON request body.""" body = {} @@ -850,6 +757,7 @@ def as_dict(self) -> dict: if self.pause_status is not None: body['pause_status'] = self.pause_status.value if self.schedule_id is not None: body['schedule_id'] = self.schedule_id if self.update_time is not None: body['update_time'] = self.update_time + if self.warehouse_id is not None: body['warehouse_id'] = self.warehouse_id return body @classmethod @@ -862,7 +770,8 @@ def from_dict(cls, d: Dict[str, any]) -> Schedule: etag=d.get('etag', None), pause_status=_enum(d, 'pause_status', SchedulePauseStatus), schedule_id=d.get('schedule_id', None), - update_time=d.get('update_time', None)) + update_time=d.get('update_time', None), + warehouse_id=d.get('warehouse_id', None)) class SchedulePauseStatus(Enum): @@ -1032,93 +941,6 @@ def from_dict(cls, d: Dict[str, any]) -> UnpublishDashboardResponse: return cls() -@dataclass -class UpdateDashboardRequest: - dashboard_id: Optional[str] = None - """UUID identifying the dashboard.""" - - display_name: Optional[str] = None - """The display name of the dashboard.""" - - etag: Optional[str] = None - """The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard - has not been modified since the last read. This field is excluded in List Dashboards responses.""" - - serialized_dashboard: Optional[str] = None - """The contents of the dashboard in serialized string form. This field is excluded in List - Dashboards responses. Use the [get dashboard API] to retrieve an example response, which - includes the `serialized_dashboard` field. This field provides the structure of the JSON string - that represents the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get""" - - warehouse_id: Optional[str] = None - """The warehouse ID used to run the dashboard.""" - - def as_dict(self) -> dict: - """Serializes the UpdateDashboardRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.dashboard_id is not None: body['dashboard_id'] = self.dashboard_id - if self.display_name is not None: body['display_name'] = self.display_name - if self.etag is not None: body['etag'] = self.etag - if self.serialized_dashboard is not None: body['serialized_dashboard'] = self.serialized_dashboard - if self.warehouse_id is not None: body['warehouse_id'] = self.warehouse_id - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> UpdateDashboardRequest: - """Deserializes the UpdateDashboardRequest from a dictionary.""" - return cls(dashboard_id=d.get('dashboard_id', None), - display_name=d.get('display_name', None), - etag=d.get('etag', None), - serialized_dashboard=d.get('serialized_dashboard', None), - warehouse_id=d.get('warehouse_id', None)) - - -@dataclass -class UpdateScheduleRequest: - cron_schedule: CronSchedule - """The cron expression describing the frequency of the periodic refresh for this schedule.""" - - dashboard_id: Optional[str] = None - """UUID identifying the dashboard to which the schedule belongs.""" - - display_name: Optional[str] = None - """The display name for schedule.""" - - etag: Optional[str] = None - """The etag for the schedule. Must be left empty on create, must be provided on updates to ensure - that the schedule has not been modified since the last read, and can be optionally provided on - delete.""" - - pause_status: Optional[SchedulePauseStatus] = None - """The status indicates whether this schedule is paused or not.""" - - schedule_id: Optional[str] = None - """UUID identifying the schedule.""" - - def as_dict(self) -> dict: - """Serializes the UpdateScheduleRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.cron_schedule: body['cron_schedule'] = self.cron_schedule.as_dict() - if self.dashboard_id is not None: body['dashboard_id'] = self.dashboard_id - if self.display_name is not None: body['display_name'] = self.display_name - if self.etag is not None: body['etag'] = self.etag - if self.pause_status is not None: body['pause_status'] = self.pause_status.value - if self.schedule_id is not None: body['schedule_id'] = self.schedule_id - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> UpdateScheduleRequest: - """Deserializes the UpdateScheduleRequest from a dictionary.""" - return cls(cron_schedule=_from_dict(d, 'cron_schedule', CronSchedule), - dashboard_id=d.get('dashboard_id', None), - display_name=d.get('display_name', None), - etag=d.get('etag', None), - pause_status=_enum(d, 'pause_status', SchedulePauseStatus), - schedule_id=d.get('schedule_id', None)) - - class GenieAPI: """Genie provides a no-code experience for business users, powered by AI/BI. Analysts set up spaces that business users can use to ask questions using natural language. Genie uses data registered to Unity @@ -1313,66 +1135,31 @@ class LakeviewAPI: def __init__(self, api_client): self._api = api_client - def create(self, - display_name: str, - *, - parent_path: Optional[str] = None, - serialized_dashboard: Optional[str] = None, - warehouse_id: Optional[str] = None) -> Dashboard: + def create(self, *, dashboard: Optional[Dashboard] = None) -> Dashboard: """Create dashboard. Create a draft dashboard. - :param display_name: str - The display name of the dashboard. - :param parent_path: str (optional) - The workspace path of the folder containing the dashboard. Includes leading slash and no trailing - slash. This field is excluded in List Dashboards responses. - :param serialized_dashboard: str (optional) - The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. Use the [get dashboard API] to retrieve an example response, which includes the - `serialized_dashboard` field. This field provides the structure of the JSON string that represents - the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - :param warehouse_id: str (optional) - The warehouse ID used to run the dashboard. + :param dashboard: :class:`Dashboard` (optional) :returns: :class:`Dashboard` """ - body = {} - if display_name is not None: body['display_name'] = display_name - if parent_path is not None: body['parent_path'] = parent_path - if serialized_dashboard is not None: body['serialized_dashboard'] = serialized_dashboard - if warehouse_id is not None: body['warehouse_id'] = warehouse_id + body = dashboard headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do('POST', '/api/2.0/lakeview/dashboards', body=body, headers=headers) return Dashboard.from_dict(res) - def create_schedule(self, - dashboard_id: str, - cron_schedule: CronSchedule, - *, - display_name: Optional[str] = None, - pause_status: Optional[SchedulePauseStatus] = None) -> Schedule: + def create_schedule(self, dashboard_id: str, *, schedule: Optional[Schedule] = None) -> Schedule: """Create dashboard schedule. :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. - :param cron_schedule: :class:`CronSchedule` - The cron expression describing the frequency of the periodic refresh for this schedule. - :param display_name: str (optional) - The display name for schedule. - :param pause_status: :class:`SchedulePauseStatus` (optional) - The status indicates whether this schedule is paused or not. + :param schedule: :class:`Schedule` (optional) :returns: :class:`Schedule` """ - body = {} - if cron_schedule is not None: body['cron_schedule'] = cron_schedule.as_dict() - if display_name is not None: body['display_name'] = display_name - if pause_status is not None: body['pause_status'] = pause_status.value + body = schedule headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do('POST', @@ -1381,21 +1168,22 @@ def create_schedule(self, headers=headers) return Schedule.from_dict(res) - def create_subscription(self, dashboard_id: str, schedule_id: str, - subscriber: Subscriber) -> Subscription: + def create_subscription(self, + dashboard_id: str, + schedule_id: str, + *, + subscription: Optional[Subscription] = None) -> Subscription: """Create schedule subscription. :param dashboard_id: str UUID identifying the dashboard to which the subscription belongs. :param schedule_id: str UUID identifying the schedule to which the subscription belongs. - :param subscriber: :class:`Subscriber` - Subscriber details for users and destinations to be added as subscribers to the schedule. + :param subscription: :class:`Subscription` (optional) :returns: :class:`Subscription` """ - body = {} - if subscriber is not None: body['subscriber'] = subscriber.as_dict() + body = subscription headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do( @@ -1729,41 +1517,18 @@ def unpublish(self, dashboard_id: str): self._api.do('DELETE', f'/api/2.0/lakeview/dashboards/{dashboard_id}/published', headers=headers) - def update(self, - dashboard_id: str, - *, - display_name: Optional[str] = None, - etag: Optional[str] = None, - serialized_dashboard: Optional[str] = None, - warehouse_id: Optional[str] = None) -> Dashboard: + def update(self, dashboard_id: str, *, dashboard: Optional[Dashboard] = None) -> Dashboard: """Update dashboard. Update a draft dashboard. :param dashboard_id: str UUID identifying the dashboard. - :param display_name: str (optional) - The display name of the dashboard. - :param etag: str (optional) - The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard has - not been modified since the last read. This field is excluded in List Dashboards responses. - :param serialized_dashboard: str (optional) - The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. Use the [get dashboard API] to retrieve an example response, which includes the - `serialized_dashboard` field. This field provides the structure of the JSON string that represents - the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - :param warehouse_id: str (optional) - The warehouse ID used to run the dashboard. + :param dashboard: :class:`Dashboard` (optional) :returns: :class:`Dashboard` """ - body = {} - if display_name is not None: body['display_name'] = display_name - if etag is not None: body['etag'] = etag - if serialized_dashboard is not None: body['serialized_dashboard'] = serialized_dashboard - if warehouse_id is not None: body['warehouse_id'] = warehouse_id + body = dashboard headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do('PATCH', @@ -1775,34 +1540,19 @@ def update(self, def update_schedule(self, dashboard_id: str, schedule_id: str, - cron_schedule: CronSchedule, *, - display_name: Optional[str] = None, - etag: Optional[str] = None, - pause_status: Optional[SchedulePauseStatus] = None) -> Schedule: + schedule: Optional[Schedule] = None) -> Schedule: """Update dashboard schedule. :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str UUID identifying the schedule. - :param cron_schedule: :class:`CronSchedule` - The cron expression describing the frequency of the periodic refresh for this schedule. - :param display_name: str (optional) - The display name for schedule. - :param etag: str (optional) - The etag for the schedule. Must be left empty on create, must be provided on updates to ensure that - the schedule has not been modified since the last read, and can be optionally provided on delete. - :param pause_status: :class:`SchedulePauseStatus` (optional) - The status indicates whether this schedule is paused or not. + :param schedule: :class:`Schedule` (optional) :returns: :class:`Schedule` """ - body = {} - if cron_schedule is not None: body['cron_schedule'] = cron_schedule.as_dict() - if display_name is not None: body['display_name'] = display_name - if etag is not None: body['etag'] = etag - if pause_status is not None: body['pause_status'] = pause_status.value + body = schedule headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do('PUT', diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index f1c56a1a..05d1ccce 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -2643,7 +2643,8 @@ def set(self, access_control_list: Optional[List[AccessControlRequest]] = None) -> ObjectPermissions: """Set object permissions. - Sets permissions on an object. Objects can inherit permissions from their parent objects or root + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their parent objects or root object. :param request_object_type: str @@ -3205,7 +3206,8 @@ def set_permissions( access_control_list: Optional[List[PasswordAccessControlRequest]] = None) -> PasswordPermissions: """Set password permissions. - Sets permissions on all passwords. Passwords can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param access_control_list: List[:class:`PasswordAccessControlRequest`] (optional) diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index a4f138d6..82d3bac6 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -2482,8 +2482,9 @@ class RepairRun: be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set parameters containing - information about job runs.""" + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" job_parameters: Optional[Dict[str, str]] = None """Job-level parameters used in the run. for example `"param": "overriding_val"`""" @@ -2916,9 +2917,6 @@ class Run: overriding_parameters: Optional[RunParameters] = None """The parameters used for this run.""" - prev_page_token: Optional[str] = None - """A token that can be used to list the previous page of sub-resources.""" - queue_duration: Optional[int] = None """The time in milliseconds that the run has spent in the queue.""" @@ -3005,7 +3003,6 @@ def as_dict(self) -> dict: if self.original_attempt_run_id is not None: body['original_attempt_run_id'] = self.original_attempt_run_id if self.overriding_parameters: body['overriding_parameters'] = self.overriding_parameters.as_dict() - if self.prev_page_token is not None: body['prev_page_token'] = self.prev_page_token if self.queue_duration is not None: body['queue_duration'] = self.queue_duration if self.repair_history: body['repair_history'] = [v.as_dict() for v in self.repair_history] if self.run_duration is not None: body['run_duration'] = self.run_duration @@ -3044,7 +3041,6 @@ def from_dict(cls, d: Dict[str, any]) -> Run: number_in_job=d.get('number_in_job', None), original_attempt_run_id=d.get('original_attempt_run_id', None), overriding_parameters=_from_dict(d, 'overriding_parameters', RunParameters), - prev_page_token=d.get('prev_page_token', None), queue_duration=d.get('queue_duration', None), repair_history=_repeated_dict(d, 'repair_history', RepairHistoryItem), run_duration=d.get('run_duration', None), @@ -3190,8 +3186,9 @@ class RunJobTask: be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set parameters containing - information about job runs.""" + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" job_parameters: Optional[Dict[str, str]] = None """Job-level parameters used to trigger the job.""" @@ -3350,8 +3347,9 @@ class RunNow: be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set parameters containing - information about job runs.""" + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" job_parameters: Optional[Dict[str, str]] = None """Job-level parameters used in the run. for example `"param": "overriding_val"`""" @@ -3563,8 +3561,9 @@ class RunParameters: be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set parameters containing - information about job runs.""" + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" notebook_params: Optional[Dict[str, str]] = None """A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": @@ -3774,13 +3773,13 @@ class RunTask: once the Jobs service has requested a cluster for the run.""" condition_task: Optional[RunConditionTask] = None - """If condition_task, specifies a condition with an outcome that can be used to control the - execution of other tasks. Does not require a cluster to execute and does not support retries or - notifications.""" + """The task evaluates a condition that can be used to control the execution of other tasks when the + `condition_task` field is present. The condition task does not require a cluster to execute and + does not support retries or notifications.""" dbt_task: Optional[DbtTask] = None - """If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and - the ability to use a serverless or a pro SQL warehouse.""" + """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task + requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.""" depends_on: Optional[List[TaskDependency]] = None """An optional array of objects specifying the dependency graph of the task. All tasks specified in @@ -3815,7 +3814,8 @@ class RunTask: responding. We suggest running jobs and tasks on new clusters for greater reliability""" for_each_task: Optional[RunForEachTask] = None - """If for_each_task, indicates that this task must execute the nested task within it.""" + """The task executes a nested task for every input provided when the `for_each_task` field is + present.""" git_source: Optional[GitSource] = None """An optional specification for a remote Git repository containing the source code used by tasks. @@ -3837,18 +3837,18 @@ class RunTask: """If new_cluster, a description of a new cluster that is created for each run.""" notebook_task: Optional[NotebookTask] = None - """If notebook_task, indicates that this task must run a notebook. This field may not be specified - in conjunction with spark_jar_task.""" + """The task runs a notebook when the `notebook_task` field is present.""" notification_settings: Optional[TaskNotificationSettings] = None """Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task run.""" pipeline_task: Optional[PipelineTask] = None - """If pipeline_task, indicates that this task must execute a Pipeline.""" + """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines + configured to use triggered more are supported.""" python_wheel_task: Optional[PythonWheelTask] = None - """If python_wheel_task, indicates that this job must execute a PythonWheel.""" + """The task runs a Python wheel when the `python_wheel_task` field is present.""" queue_duration: Optional[int] = None """The time in milliseconds that the run has spent in the queue.""" @@ -3868,7 +3868,7 @@ class RunTask: :method:jobs/create for a list of possible values.""" run_job_task: Optional[RunJobTask] = None - """If run_job_task, indicates that this task must execute another job.""" + """The task triggers another job when the `run_job_task` field is present.""" run_page_url: Optional[str] = None @@ -3880,14 +3880,14 @@ class RunTask: duration of a multitask job run is the value of the `run_duration` field.""" spark_jar_task: Optional[SparkJarTask] = None - """If spark_jar_task, indicates that this task must run a JAR.""" + """The task runs a JAR when the `spark_jar_task` field is present.""" spark_python_task: Optional[SparkPythonTask] = None - """If spark_python_task, indicates that this task must run a Python file.""" + """The task runs a Python file when the `spark_python_task` field is present.""" spark_submit_task: Optional[SparkSubmitTask] = None - """If `spark_submit_task`, indicates that this task must be launched by the spark submit script. - This task can run only on new clusters. + """(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. + This task can run only on new clusters and is not compatible with serverless compute. In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark @@ -3903,7 +3903,8 @@ class RunTask: The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.""" sql_task: Optional[SqlTask] = None - """If sql_task, indicates that this job must execute a SQL task.""" + """The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when + the `sql_task` field is present.""" start_time: Optional[int] = None """The time at which this run was started in epoch milliseconds (milliseconds since 1/1/1970 UTC). @@ -4664,13 +4665,13 @@ class SubmitTask: used to reference the tasks to be updated or reset.""" condition_task: Optional[ConditionTask] = None - """If condition_task, specifies a condition with an outcome that can be used to control the - execution of other tasks. Does not require a cluster to execute and does not support retries or - notifications.""" + """The task evaluates a condition that can be used to control the execution of other tasks when the + `condition_task` field is present. The condition task does not require a cluster to execute and + does not support retries or notifications.""" dbt_task: Optional[DbtTask] = None - """If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and - the ability to use a serverless or a pro SQL warehouse.""" + """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task + requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.""" depends_on: Optional[List[TaskDependency]] = None """An optional array of objects specifying the dependency graph of the task. All tasks specified in @@ -4694,7 +4695,8 @@ class SubmitTask: responding. We suggest running jobs and tasks on new clusters for greater reliability""" for_each_task: Optional[ForEachTask] = None - """If for_each_task, indicates that this task must execute the nested task within it.""" + """The task executes a nested task for every input provided when the `for_each_task` field is + present.""" health: Optional[JobsHealthRules] = None """An optional set of health rules that can be defined for this job.""" @@ -4707,18 +4709,18 @@ class SubmitTask: """If new_cluster, a description of a new cluster that is created for each run.""" notebook_task: Optional[NotebookTask] = None - """If notebook_task, indicates that this task must run a notebook. This field may not be specified - in conjunction with spark_jar_task.""" + """The task runs a notebook when the `notebook_task` field is present.""" notification_settings: Optional[TaskNotificationSettings] = None """Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task run.""" pipeline_task: Optional[PipelineTask] = None - """If pipeline_task, indicates that this task must execute a Pipeline.""" + """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines + configured to use triggered more are supported.""" python_wheel_task: Optional[PythonWheelTask] = None - """If python_wheel_task, indicates that this job must execute a PythonWheel.""" + """The task runs a Python wheel when the `python_wheel_task` field is present.""" run_if: Optional[RunIf] = None """An optional value indicating the condition that determines whether the task should be run once @@ -4726,17 +4728,17 @@ class SubmitTask: :method:jobs/create for a list of possible values.""" run_job_task: Optional[RunJobTask] = None - """If run_job_task, indicates that this task must execute another job.""" + """The task triggers another job when the `run_job_task` field is present.""" spark_jar_task: Optional[SparkJarTask] = None - """If spark_jar_task, indicates that this task must run a JAR.""" + """The task runs a JAR when the `spark_jar_task` field is present.""" spark_python_task: Optional[SparkPythonTask] = None - """If spark_python_task, indicates that this task must run a Python file.""" + """The task runs a Python file when the `spark_python_task` field is present.""" spark_submit_task: Optional[SparkSubmitTask] = None - """If `spark_submit_task`, indicates that this task must be launched by the spark submit script. - This task can run only on new clusters. + """(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. + This task can run only on new clusters and is not compatible with serverless compute. In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark @@ -4752,7 +4754,8 @@ class SubmitTask: The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.""" sql_task: Optional[SqlTask] = None - """If sql_task, indicates that this job must execute a SQL task.""" + """The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when + the `sql_task` field is present.""" timeout_seconds: Optional[int] = None """An optional timeout applied to each run of this job task. A value of `0` means no timeout.""" @@ -4866,13 +4869,13 @@ class Task: used to reference the tasks to be updated or reset.""" condition_task: Optional[ConditionTask] = None - """If condition_task, specifies a condition with an outcome that can be used to control the - execution of other tasks. Does not require a cluster to execute and does not support retries or - notifications.""" + """The task evaluates a condition that can be used to control the execution of other tasks when the + `condition_task` field is present. The condition task does not require a cluster to execute and + does not support retries or notifications.""" dbt_task: Optional[DbtTask] = None - """If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and - the ability to use a serverless or a pro SQL warehouse.""" + """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task + requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.""" depends_on: Optional[List[TaskDependency]] = None """An optional array of objects specifying the dependency graph of the task. All tasks specified in @@ -4900,7 +4903,8 @@ class Task: responding. We suggest running jobs and tasks on new clusters for greater reliability""" for_each_task: Optional[ForEachTask] = None - """If for_each_task, indicates that this task must execute the nested task within it.""" + """The task executes a nested task for every input provided when the `for_each_task` field is + present.""" health: Optional[JobsHealthRules] = None """An optional set of health rules that can be defined for this job.""" @@ -4927,18 +4931,18 @@ class Task: """If new_cluster, a description of a new cluster that is created for each run.""" notebook_task: Optional[NotebookTask] = None - """If notebook_task, indicates that this task must run a notebook. This field may not be specified - in conjunction with spark_jar_task.""" + """The task runs a notebook when the `notebook_task` field is present.""" notification_settings: Optional[TaskNotificationSettings] = None """Optional notification settings that are used when sending notifications to each of the `email_notifications` and `webhook_notifications` for this task.""" pipeline_task: Optional[PipelineTask] = None - """If pipeline_task, indicates that this task must execute a Pipeline.""" + """The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines + configured to use triggered more are supported.""" python_wheel_task: Optional[PythonWheelTask] = None - """If python_wheel_task, indicates that this job must execute a PythonWheel.""" + """The task runs a Python wheel when the `python_wheel_task` field is present.""" retry_on_timeout: Optional[bool] = None """An optional policy to specify whether to retry a job when it times out. The default behavior is @@ -4954,17 +4958,17 @@ class Task: least one dependency failed * `ALL_FAILED`: ALl dependencies have failed""" run_job_task: Optional[RunJobTask] = None - """If run_job_task, indicates that this task must execute another job.""" + """The task triggers another job when the `run_job_task` field is present.""" spark_jar_task: Optional[SparkJarTask] = None - """If spark_jar_task, indicates that this task must run a JAR.""" + """The task runs a JAR when the `spark_jar_task` field is present.""" spark_python_task: Optional[SparkPythonTask] = None - """If spark_python_task, indicates that this task must run a Python file.""" + """The task runs a Python file when the `spark_python_task` field is present.""" spark_submit_task: Optional[SparkSubmitTask] = None - """If `spark_submit_task`, indicates that this task must be launched by the spark submit script. - This task can run only on new clusters. + """(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. + This task can run only on new clusters and is not compatible with serverless compute. In the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark @@ -4980,7 +4984,8 @@ class Task: The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.""" sql_task: Optional[SqlTask] = None - """If sql_task, indicates that this job must execute a SQL task.""" + """The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when + the `sql_task` field is present.""" timeout_seconds: Optional[int] = None """An optional timeout applied to each run of this job task. A value of `0` means no timeout.""" @@ -5922,8 +5927,8 @@ def get_run(self, :param include_resolved_values: bool (optional) Whether to include resolved parameter values in the response. :param page_token: str (optional) - To list the next page or the previous page of job tasks, set this field to the value of the - `next_page_token` or `prev_page_token` returned in the GetJob response. + To list the next page of job tasks, set this field to the value of the `next_page_token` returned in + the GetJob response. :returns: :class:`Run` """ @@ -6111,8 +6116,9 @@ def repair_run(self, in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set parameters containing - information about job runs. + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param latest_repair_id: int (optional) @@ -6304,8 +6310,9 @@ def run_now(self, in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set parameters containing - information about job runs. + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param notebook_params: Dict[str,str] (optional) @@ -6423,7 +6430,8 @@ def set_permissions( access_control_list: Optional[List[JobAccessControlRequest]] = None) -> JobPermissions: """Set job permissions. - Sets permissions on a job. Jobs can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param job_id: str The job for which to get or manage permissions. diff --git a/databricks/sdk/service/marketplace.py b/databricks/sdk/service/marketplace.py index 1a2dedf3..242e3bf0 100755 --- a/databricks/sdk/service/marketplace.py +++ b/databricks/sdk/service/marketplace.py @@ -56,6 +56,7 @@ class AssetType(Enum): ASSET_TYPE_MEDIA = 'ASSET_TYPE_MEDIA' ASSET_TYPE_MODEL = 'ASSET_TYPE_MODEL' ASSET_TYPE_NOTEBOOK = 'ASSET_TYPE_NOTEBOOK' + ASSET_TYPE_PARTNER_INTEGRATION = 'ASSET_TYPE_PARTNER_INTEGRATION' @dataclass diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index b2cec812..c44edbe4 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -4596,7 +4596,8 @@ def set_permissions( ) -> ExperimentPermissions: """Set experiment permissions. - Sets permissions on an experiment. Experiments can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param experiment_id: str The experiment for which to get or manage permissions. @@ -5571,8 +5572,8 @@ def set_permissions( ) -> RegisteredModelPermissions: """Set registered model permissions. - Sets permissions on a registered model. Registered models can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param registered_model_id: str The registered model for which to get or manage permissions. diff --git a/databricks/sdk/service/oauth2.py b/databricks/sdk/service/oauth2.py index 0c439ae7..01edcdf5 100755 --- a/databricks/sdk/service/oauth2.py +++ b/databricks/sdk/service/oauth2.py @@ -389,19 +389,24 @@ def from_dict(cls, d: Dict[str, any]) -> GetPublishedAppsOutput: @dataclass class ListServicePrincipalSecretsResponse: + next_page_token: Optional[str] = None + """A token, which can be sent as `page_token` to retrieve the next page.""" + secrets: Optional[List[SecretInfo]] = None """List of the secrets""" def as_dict(self) -> dict: """Serializes the ListServicePrincipalSecretsResponse into a dictionary suitable for use as a JSON request body.""" body = {} + if self.next_page_token is not None: body['next_page_token'] = self.next_page_token if self.secrets: body['secrets'] = [v.as_dict() for v in self.secrets] return body @classmethod def from_dict(cls, d: Dict[str, any]) -> ListServicePrincipalSecretsResponse: """Deserializes the ListServicePrincipalSecretsResponse from a dictionary.""" - return cls(secrets=_repeated_dict(d, 'secrets', SecretInfo)) + return cls(next_page_token=d.get('next_page_token', None), + secrets=_repeated_dict(d, 'secrets', SecretInfo)) @dataclass @@ -960,7 +965,7 @@ def delete(self, service_principal_id: int, secret_id: str): f'/api/2.0/accounts/{self._api.account_id}/servicePrincipals/{service_principal_id}/credentials/secrets/{secret_id}', headers=headers) - def list(self, service_principal_id: int) -> Iterator[SecretInfo]: + def list(self, service_principal_id: int, *, page_token: Optional[str] = None) -> Iterator[SecretInfo]: """List service principal secrets. List all secrets associated with the given service principal. This operation only returns information @@ -968,15 +973,30 @@ def list(self, service_principal_id: int) -> Iterator[SecretInfo]: :param service_principal_id: int The service principal ID. + :param page_token: str (optional) + An opaque page token which was the `next_page_token` in the response of the previous request to list + the secrets for this service principal. Provide this token to retrieve the next page of secret + entries. When providing a `page_token`, all other parameters provided to the request must match the + previous request. To list all of the secrets for a service principal, it is necessary to continue + requesting pages of entries until the response contains no `next_page_token`. Note that the number + of entries returned must not be used to determine when the listing is complete. :returns: Iterator over :class:`SecretInfo` """ + query = {} + if page_token is not None: query['page_token'] = page_token headers = {'Accept': 'application/json', } - json = self._api.do( - 'GET', - f'/api/2.0/accounts/{self._api.account_id}/servicePrincipals/{service_principal_id}/credentials/secrets', - headers=headers) - parsed = ListServicePrincipalSecretsResponse.from_dict(json).secrets - return parsed if parsed is not None else [] + while True: + json = self._api.do( + 'GET', + f'/api/2.0/accounts/{self._api.account_id}/servicePrincipals/{service_principal_id}/credentials/secrets', + query=query, + headers=headers) + if 'secrets' in json: + for v in json['secrets']: + yield SecretInfo.from_dict(v) + if 'next_page_token' not in json or not json['next_page_token']: + return + query['page_token'] = json['next_page_token'] diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 9c12f878..26461d08 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -615,6 +615,10 @@ def from_dict(cls, d: Dict[str, any]) -> IngestionConfig: @dataclass class IngestionGatewayPipelineDefinition: connection_id: Optional[str] = None + """[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection this gateway + pipeline uses to communicate with the source.""" + + connection_name: Optional[str] = None """Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.""" @@ -633,6 +637,7 @@ def as_dict(self) -> dict: """Serializes the IngestionGatewayPipelineDefinition into a dictionary suitable for use as a JSON request body.""" body = {} if self.connection_id is not None: body['connection_id'] = self.connection_id + if self.connection_name is not None: body['connection_name'] = self.connection_name if self.gateway_storage_catalog is not None: body['gateway_storage_catalog'] = self.gateway_storage_catalog if self.gateway_storage_name is not None: body['gateway_storage_name'] = self.gateway_storage_name @@ -644,6 +649,7 @@ def as_dict(self) -> dict: def from_dict(cls, d: Dict[str, any]) -> IngestionGatewayPipelineDefinition: """Deserializes the IngestionGatewayPipelineDefinition from a dictionary.""" return cls(connection_id=d.get('connection_id', None), + connection_name=d.get('connection_name', None), gateway_storage_catalog=d.get('gateway_storage_catalog', None), gateway_storage_name=d.get('gateway_storage_name', None), gateway_storage_schema=d.get('gateway_storage_schema', None)) @@ -2122,13 +2128,13 @@ class PipelinesAPI: def __init__(self, api_client): self._api = api_client - def wait_get_pipeline_idle( + def wait_get_pipeline_running( self, pipeline_id: str, timeout=timedelta(minutes=20), callback: Optional[Callable[[GetPipelineResponse], None]] = None) -> GetPipelineResponse: deadline = time.time() + timeout.total_seconds() - target_states = (PipelineState.IDLE, ) + target_states = (PipelineState.RUNNING, ) failure_states = (PipelineState.FAILED, ) status_message = 'polling...' attempt = 1 @@ -2141,7 +2147,7 @@ def wait_get_pipeline_idle( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach IDLE, got {status}: {status_message}' + msg = f'failed to reach RUNNING, got {status}: {status_message}' raise OperationFailed(msg) prefix = f"pipeline_id={pipeline_id}" sleep = attempt @@ -2153,13 +2159,13 @@ def wait_get_pipeline_idle( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def wait_get_pipeline_running( + def wait_get_pipeline_idle( self, pipeline_id: str, timeout=timedelta(minutes=20), callback: Optional[Callable[[GetPipelineResponse], None]] = None) -> GetPipelineResponse: deadline = time.time() + timeout.total_seconds() - target_states = (PipelineState.RUNNING, ) + target_states = (PipelineState.IDLE, ) failure_states = (PipelineState.FAILED, ) status_message = 'polling...' attempt = 1 @@ -2172,7 +2178,7 @@ def wait_get_pipeline_running( if callback: callback(poll) if status in failure_states: - msg = f'failed to reach RUNNING, got {status}: {status_message}' + msg = f'failed to reach IDLE, got {status}: {status_message}' raise OperationFailed(msg) prefix = f"pipeline_id={pipeline_id}" sleep = attempt @@ -2518,7 +2524,8 @@ def set_permissions( access_control_list: Optional[List[PipelineAccessControlRequest]] = None) -> PipelinePermissions: """Set pipeline permissions. - Sets permissions on a pipeline. Pipelines can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param pipeline_id: str The pipeline for which to get or manage permissions. diff --git a/databricks/sdk/service/provisioning.py b/databricks/sdk/service/provisioning.py index 1dc6f3b8..b1d825d1 100755 --- a/databricks/sdk/service/provisioning.py +++ b/databricks/sdk/service/provisioning.py @@ -412,6 +412,9 @@ class CreateWorkspaceRequest: gke_config: Optional[GkeConfig] = None """The configurations for the GKE cluster of a Databricks workspace.""" + is_no_public_ip_enabled: Optional[bool] = None + """Whether no public IP is enabled for the workspace.""" + location: Optional[str] = None """The Google Cloud region of the workspace data plane in your Google account. For example, `us-east4`.""" @@ -460,6 +463,8 @@ def as_dict(self) -> dict: if self.gcp_managed_network_config: body['gcp_managed_network_config'] = self.gcp_managed_network_config.as_dict() if self.gke_config: body['gke_config'] = self.gke_config.as_dict() + if self.is_no_public_ip_enabled is not None: + body['is_no_public_ip_enabled'] = self.is_no_public_ip_enabled if self.location is not None: body['location'] = self.location if self.managed_services_customer_managed_key_id is not None: body['managed_services_customer_managed_key_id'] = self.managed_services_customer_managed_key_id @@ -486,6 +491,7 @@ def from_dict(cls, d: Dict[str, any]) -> CreateWorkspaceRequest: gcp_managed_network_config=_from_dict(d, 'gcp_managed_network_config', GcpManagedNetworkConfig), gke_config=_from_dict(d, 'gke_config', GkeConfig), + is_no_public_ip_enabled=d.get('is_no_public_ip_enabled', None), location=d.get('location', None), managed_services_customer_managed_key_id=d.get('managed_services_customer_managed_key_id', None), @@ -632,6 +638,35 @@ class ErrorType(Enum): VPC = 'vpc' +@dataclass +class ExternalCustomerInfo: + authoritative_user_email: Optional[str] = None + """Email of the authoritative user.""" + + authoritative_user_full_name: Optional[str] = None + """The authoritative user full name.""" + + customer_name: Optional[str] = None + """The legal entity name for the external workspace""" + + def as_dict(self) -> dict: + """Serializes the ExternalCustomerInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.authoritative_user_email is not None: + body['authoritative_user_email'] = self.authoritative_user_email + if self.authoritative_user_full_name is not None: + body['authoritative_user_full_name'] = self.authoritative_user_full_name + if self.customer_name is not None: body['customer_name'] = self.customer_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ExternalCustomerInfo: + """Deserializes the ExternalCustomerInfo from a dictionary.""" + return cls(authoritative_user_email=d.get('authoritative_user_email', None), + authoritative_user_full_name=d.get('authoritative_user_full_name', None), + customer_name=d.get('customer_name', None)) + + @dataclass class GcpKeyInfo: kms_key_id: str @@ -1443,6 +1478,10 @@ class Workspace: This value must be unique across all non-deleted deployments across all AWS regions.""" + external_customer_info: Optional[ExternalCustomerInfo] = None + """If this workspace is for a external customer, then external_customer_info is populated. If this + workspace is not for a external customer, then external_customer_info is empty.""" + gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None """The network settings for the workspace. The configurations are only for Databricks-managed VPCs. It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP @@ -1466,6 +1505,9 @@ class Workspace: gke_config: Optional[GkeConfig] = None """The configurations for the GKE cluster of a Databricks workspace.""" + is_no_public_ip_enabled: Optional[bool] = None + """Whether no public IP is enabled for the workspace.""" + location: Optional[str] = None """The Google Cloud region of the workspace data plane in your Google account (for example, `us-east4`).""" @@ -1524,9 +1566,12 @@ def as_dict(self) -> dict: if self.credentials_id is not None: body['credentials_id'] = self.credentials_id if self.custom_tags: body['custom_tags'] = self.custom_tags if self.deployment_name is not None: body['deployment_name'] = self.deployment_name + if self.external_customer_info: body['external_customer_info'] = self.external_customer_info.as_dict() if self.gcp_managed_network_config: body['gcp_managed_network_config'] = self.gcp_managed_network_config.as_dict() if self.gke_config: body['gke_config'] = self.gke_config.as_dict() + if self.is_no_public_ip_enabled is not None: + body['is_no_public_ip_enabled'] = self.is_no_public_ip_enabled if self.location is not None: body['location'] = self.location if self.managed_services_customer_managed_key_id is not None: body['managed_services_customer_managed_key_id'] = self.managed_services_customer_managed_key_id @@ -1557,9 +1602,11 @@ def from_dict(cls, d: Dict[str, any]) -> Workspace: credentials_id=d.get('credentials_id', None), custom_tags=d.get('custom_tags', None), deployment_name=d.get('deployment_name', None), + external_customer_info=_from_dict(d, 'external_customer_info', ExternalCustomerInfo), gcp_managed_network_config=_from_dict(d, 'gcp_managed_network_config', GcpManagedNetworkConfig), gke_config=_from_dict(d, 'gke_config', GkeConfig), + is_no_public_ip_enabled=d.get('is_no_public_ip_enabled', None), location=d.get('location', None), managed_services_customer_managed_key_id=d.get('managed_services_customer_managed_key_id', None), @@ -2399,6 +2446,7 @@ def create(self, deployment_name: Optional[str] = None, gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None, gke_config: Optional[GkeConfig] = None, + is_no_public_ip_enabled: Optional[bool] = None, location: Optional[str] = None, managed_services_customer_managed_key_id: Optional[str] = None, network_id: Optional[str] = None, @@ -2477,6 +2525,8 @@ def create(self, [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html :param gke_config: :class:`GkeConfig` (optional) The configurations for the GKE cluster of a Databricks workspace. + :param is_no_public_ip_enabled: bool (optional) + Whether no public IP is enabled for the workspace. :param location: str (optional) The Google Cloud region of the workspace data plane in your Google account. For example, `us-east4`. :param managed_services_customer_managed_key_id: str (optional) @@ -2519,6 +2569,7 @@ def create(self, if gcp_managed_network_config is not None: body['gcp_managed_network_config'] = gcp_managed_network_config.as_dict() if gke_config is not None: body['gke_config'] = gke_config.as_dict() + if is_no_public_ip_enabled is not None: body['is_no_public_ip_enabled'] = is_no_public_ip_enabled if location is not None: body['location'] = location if managed_services_customer_managed_key_id is not None: body['managed_services_customer_managed_key_id'] = managed_services_customer_managed_key_id @@ -2552,6 +2603,7 @@ def create_and_wait( deployment_name: Optional[str] = None, gcp_managed_network_config: Optional[GcpManagedNetworkConfig] = None, gke_config: Optional[GkeConfig] = None, + is_no_public_ip_enabled: Optional[bool] = None, location: Optional[str] = None, managed_services_customer_managed_key_id: Optional[str] = None, network_id: Optional[str] = None, @@ -2568,6 +2620,7 @@ def create_and_wait( deployment_name=deployment_name, gcp_managed_network_config=gcp_managed_network_config, gke_config=gke_config, + is_no_public_ip_enabled=is_no_public_ip_enabled, location=location, managed_services_customer_managed_key_id=managed_services_customer_managed_key_id, network_id=network_id, diff --git a/databricks/sdk/service/serving.py b/databricks/sdk/service/serving.py index 7639d96f..b00420a0 100755 --- a/databricks/sdk/service/serving.py +++ b/databricks/sdk/service/serving.py @@ -2994,8 +2994,8 @@ def set_permissions( ) -> ServingEndpointPermissions: """Set serving endpoint permissions. - Sets permissions on a serving endpoint. Serving endpoints can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param serving_endpoint_id: str The serving endpoint for which to get or manage permissions. diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index a6a23515..607cc308 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -14,6 +14,122 @@ # all definitions in this file are in alphabetical order +@dataclass +class AibiDashboardEmbeddingAccessPolicy: + access_policy_type: AibiDashboardEmbeddingAccessPolicyAccessPolicyType + + def as_dict(self) -> dict: + """Serializes the AibiDashboardEmbeddingAccessPolicy into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.access_policy_type is not None: body['access_policy_type'] = self.access_policy_type.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AibiDashboardEmbeddingAccessPolicy: + """Deserializes the AibiDashboardEmbeddingAccessPolicy from a dictionary.""" + return cls(access_policy_type=_enum(d, 'access_policy_type', + AibiDashboardEmbeddingAccessPolicyAccessPolicyType)) + + +class AibiDashboardEmbeddingAccessPolicyAccessPolicyType(Enum): + + ALLOW_ALL_DOMAINS = 'ALLOW_ALL_DOMAINS' + ALLOW_APPROVED_DOMAINS = 'ALLOW_APPROVED_DOMAINS' + DENY_ALL_DOMAINS = 'DENY_ALL_DOMAINS' + + +@dataclass +class AibiDashboardEmbeddingAccessPolicySetting: + aibi_dashboard_embedding_access_policy: AibiDashboardEmbeddingAccessPolicy + + etag: Optional[str] = None + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + etag from a GET request, and pass it with the PATCH request to identify the setting version you + are updating.""" + + setting_name: Optional[str] = None + """Name of the corresponding setting. This field is populated in the response, but it will not be + respected even if it's set in the request body. The setting name in the path parameter will be + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" + + def as_dict(self) -> dict: + """Serializes the AibiDashboardEmbeddingAccessPolicySetting into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aibi_dashboard_embedding_access_policy: + body[ + 'aibi_dashboard_embedding_access_policy'] = self.aibi_dashboard_embedding_access_policy.as_dict( + ) + if self.etag is not None: body['etag'] = self.etag + if self.setting_name is not None: body['setting_name'] = self.setting_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AibiDashboardEmbeddingAccessPolicySetting: + """Deserializes the AibiDashboardEmbeddingAccessPolicySetting from a dictionary.""" + return cls(aibi_dashboard_embedding_access_policy=_from_dict( + d, 'aibi_dashboard_embedding_access_policy', AibiDashboardEmbeddingAccessPolicy), + etag=d.get('etag', None), + setting_name=d.get('setting_name', None)) + + +@dataclass +class AibiDashboardEmbeddingApprovedDomains: + approved_domains: Optional[List[str]] = None + + def as_dict(self) -> dict: + """Serializes the AibiDashboardEmbeddingApprovedDomains into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.approved_domains: body['approved_domains'] = [v for v in self.approved_domains] + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AibiDashboardEmbeddingApprovedDomains: + """Deserializes the AibiDashboardEmbeddingApprovedDomains from a dictionary.""" + return cls(approved_domains=d.get('approved_domains', None)) + + +@dataclass +class AibiDashboardEmbeddingApprovedDomainsSetting: + aibi_dashboard_embedding_approved_domains: AibiDashboardEmbeddingApprovedDomains + + etag: Optional[str] = None + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + etag from a GET request, and pass it with the PATCH request to identify the setting version you + are updating.""" + + setting_name: Optional[str] = None + """Name of the corresponding setting. This field is populated in the response, but it will not be + respected even if it's set in the request body. The setting name in the path parameter will be + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" + + def as_dict(self) -> dict: + """Serializes the AibiDashboardEmbeddingApprovedDomainsSetting into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aibi_dashboard_embedding_approved_domains: + body[ + 'aibi_dashboard_embedding_approved_domains'] = self.aibi_dashboard_embedding_approved_domains.as_dict( + ) + if self.etag is not None: body['etag'] = self.etag + if self.setting_name is not None: body['setting_name'] = self.setting_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AibiDashboardEmbeddingApprovedDomainsSetting: + """Deserializes the AibiDashboardEmbeddingApprovedDomainsSetting from a dictionary.""" + return cls(aibi_dashboard_embedding_approved_domains=_from_dict( + d, 'aibi_dashboard_embedding_approved_domains', AibiDashboardEmbeddingApprovedDomains), + etag=d.get('etag', None), + setting_name=d.get('setting_name', None)) + + @dataclass class AutomaticClusterUpdateSetting: automatic_cluster_update_workspace: ClusterAutoRestartMessage @@ -2299,6 +2415,9 @@ class TokenInfo: expiry_time: Optional[int] = None """Timestamp when the token expires.""" + last_used_day: Optional[int] = None + """Approximate timestamp for the day the token was last used. Accurate up to 1 day.""" + owner_id: Optional[int] = None """User ID of the user that owns the token.""" @@ -2316,6 +2435,7 @@ def as_dict(self) -> dict: if self.created_by_username is not None: body['created_by_username'] = self.created_by_username if self.creation_time is not None: body['creation_time'] = self.creation_time if self.expiry_time is not None: body['expiry_time'] = self.expiry_time + if self.last_used_day is not None: body['last_used_day'] = self.last_used_day if self.owner_id is not None: body['owner_id'] = self.owner_id if self.token_id is not None: body['token_id'] = self.token_id if self.workspace_id is not None: body['workspace_id'] = self.workspace_id @@ -2329,6 +2449,7 @@ def from_dict(cls, d: Dict[str, any]) -> TokenInfo: created_by_username=d.get('created_by_username', None), creation_time=d.get('creation_time', None), expiry_time=d.get('expiry_time', None), + last_used_day=d.get('last_used_day', None), owner_id=d.get('owner_id', None), token_id=d.get('token_id', None), workspace_id=d.get('workspace_id', None)) @@ -2435,6 +2556,66 @@ class TokenType(Enum): AZURE_ACTIVE_DIRECTORY_TOKEN = 'AZURE_ACTIVE_DIRECTORY_TOKEN' +@dataclass +class UpdateAibiDashboardEmbeddingAccessPolicySettingRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: AibiDashboardEmbeddingAccessPolicySetting + + field_mask: str + """Field mask is required to be passed into the PATCH request. Field mask specifies which fields of + the setting payload will be updated. The field mask needs to be supplied as single string. To + specify multiple fields in the field mask, use comma as the separator (no space).""" + + def as_dict(self) -> dict: + """Serializes the UpdateAibiDashboardEmbeddingAccessPolicySettingRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: body['allow_missing'] = self.allow_missing + if self.field_mask is not None: body['field_mask'] = self.field_mask + if self.setting: body['setting'] = self.setting.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdateAibiDashboardEmbeddingAccessPolicySettingRequest: + """Deserializes the UpdateAibiDashboardEmbeddingAccessPolicySettingRequest from a dictionary.""" + return cls(allow_missing=d.get('allow_missing', None), + field_mask=d.get('field_mask', None), + setting=_from_dict(d, 'setting', AibiDashboardEmbeddingAccessPolicySetting)) + + +@dataclass +class UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: AibiDashboardEmbeddingApprovedDomainsSetting + + field_mask: str + """Field mask is required to be passed into the PATCH request. Field mask specifies which fields of + the setting payload will be updated. The field mask needs to be supplied as single string. To + specify multiple fields in the field mask, use comma as the separator (no space).""" + + def as_dict(self) -> dict: + """Serializes the UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: body['allow_missing'] = self.allow_missing + if self.field_mask is not None: body['field_mask'] = self.field_mask + if self.setting: body['setting'] = self.setting.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest: + """Deserializes the UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest from a dictionary.""" + return cls(allow_missing=d.get('allow_missing', None), + field_mask=d.get('field_mask', None), + setting=_from_dict(d, 'setting', AibiDashboardEmbeddingApprovedDomainsSetting)) + + @dataclass class UpdateAutomaticClusterUpdateSettingRequest: """Details required to update a setting.""" @@ -3103,6 +3284,130 @@ def personal_compute(self) -> PersonalComputeAPI: return self._personal_compute +class AibiDashboardEmbeddingAccessPolicyAPI: + """Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the + workspace level. By default, this setting is conditionally enabled (ALLOW_APPROVED_DOMAINS).""" + + def __init__(self, api_client): + self._api = api_client + + def get(self, *, etag: Optional[str] = None) -> AibiDashboardEmbeddingAccessPolicySetting: + """Retrieve the AI/BI dashboard embedding access policy. + + Retrieves the AI/BI dashboard embedding access policy. The default setting is ALLOW_APPROVED_DOMAINS, + permitting AI/BI dashboards to be embedded on approved domains. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`AibiDashboardEmbeddingAccessPolicySetting` + """ + + query = {} + if etag is not None: query['etag'] = etag + headers = {'Accept': 'application/json', } + + res = self._api.do('GET', + '/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default', + query=query, + headers=headers) + return AibiDashboardEmbeddingAccessPolicySetting.from_dict(res) + + def update(self, allow_missing: bool, setting: AibiDashboardEmbeddingAccessPolicySetting, + field_mask: str) -> AibiDashboardEmbeddingAccessPolicySetting: + """Update the AI/BI dashboard embedding access policy. + + Updates the AI/BI dashboard embedding access policy at the workspace level. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`AibiDashboardEmbeddingAccessPolicySetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`AibiDashboardEmbeddingAccessPolicySetting` + """ + body = {} + if allow_missing is not None: body['allow_missing'] = allow_missing + if field_mask is not None: body['field_mask'] = field_mask + if setting is not None: body['setting'] = setting.as_dict() + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('PATCH', + '/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default', + body=body, + headers=headers) + return AibiDashboardEmbeddingAccessPolicySetting.from_dict(res) + + +class AibiDashboardEmbeddingApprovedDomainsAPI: + """Controls the list of domains approved to host the embedded AI/BI dashboards. The approved domains list + can't be mutated when the current access policy is not set to ALLOW_APPROVED_DOMAINS.""" + + def __init__(self, api_client): + self._api = api_client + + def get(self, *, etag: Optional[str] = None) -> AibiDashboardEmbeddingApprovedDomainsSetting: + """Retrieve the list of domains approved to host embedded AI/BI dashboards. + + Retrieves the list of domains approved to host embedded AI/BI dashboards. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` + """ + + query = {} + if etag is not None: query['etag'] = etag + headers = {'Accept': 'application/json', } + + res = self._api.do('GET', + '/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default', + query=query, + headers=headers) + return AibiDashboardEmbeddingApprovedDomainsSetting.from_dict(res) + + def update(self, allow_missing: bool, setting: AibiDashboardEmbeddingApprovedDomainsSetting, + field_mask: str) -> AibiDashboardEmbeddingApprovedDomainsSetting: + """Update the list of domains approved to host embedded AI/BI dashboards. + + Updates the list of domains approved to host embedded AI/BI dashboards. This update will fail if the + current workspace access policy is not ALLOW_APPROVED_DOMAINS. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` + """ + body = {} + if allow_missing is not None: body['allow_missing'] = allow_missing + if field_mask is not None: body['field_mask'] = field_mask + if setting is not None: body['setting'] = setting.as_dict() + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('PATCH', + '/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default', + body=body, + headers=headers) + return AibiDashboardEmbeddingApprovedDomainsSetting.from_dict(res) + + class AutomaticClusterUpdateAPI: """Controls whether automatic cluster update is enabled for the current workspace. By default, it is turned off.""" @@ -4580,6 +4885,8 @@ class SettingsAPI: def __init__(self, api_client): self._api = api_client + self._aibi_dashboard_embedding_access_policy = AibiDashboardEmbeddingAccessPolicyAPI(self._api) + self._aibi_dashboard_embedding_approved_domains = AibiDashboardEmbeddingApprovedDomainsAPI(self._api) self._automatic_cluster_update = AutomaticClusterUpdateAPI(self._api) self._compliance_security_profile = ComplianceSecurityProfileAPI(self._api) self._default_namespace = DefaultNamespaceAPI(self._api) @@ -4588,6 +4895,16 @@ def __init__(self, api_client): self._enhanced_security_monitoring = EnhancedSecurityMonitoringAPI(self._api) self._restrict_workspace_admins = RestrictWorkspaceAdminsAPI(self._api) + @property + def aibi_dashboard_embedding_access_policy(self) -> AibiDashboardEmbeddingAccessPolicyAPI: + """Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the workspace level.""" + return self._aibi_dashboard_embedding_access_policy + + @property + def aibi_dashboard_embedding_approved_domains(self) -> AibiDashboardEmbeddingApprovedDomainsAPI: + """Controls the list of domains approved to host the embedded AI/BI dashboards.""" + return self._aibi_dashboard_embedding_approved_domains + @property def automatic_cluster_update(self) -> AutomaticClusterUpdateAPI: """Controls whether automatic cluster update is enabled for the current workspace.""" @@ -4751,7 +5068,8 @@ def set_permissions( access_control_list: Optional[List[TokenAccessControlRequest]] = None) -> TokenPermissions: """Set token permissions. - Sets permissions on all tokens. Tokens can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param access_control_list: List[:class:`TokenAccessControlRequest`] (optional) diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index 772bc7ae..091fa9e8 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -23,418 +23,6 @@ class AuthenticationType(Enum): TOKEN = 'TOKEN' -@dataclass -class CentralCleanRoomInfo: - clean_room_assets: Optional[List[CleanRoomAssetInfo]] = None - """All assets from all collaborators that are available in the clean room. Only one of table_info - or notebook_info will be filled in.""" - - collaborators: Optional[List[CleanRoomCollaboratorInfo]] = None - """All collaborators who are in the clean room.""" - - creator: Optional[CleanRoomCollaboratorInfo] = None - """The collaborator who created the clean room.""" - - station_cloud: Optional[str] = None - """The cloud where clean room tasks will be run.""" - - station_region: Optional[str] = None - """The region where clean room tasks will be run.""" - - def as_dict(self) -> dict: - """Serializes the CentralCleanRoomInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.clean_room_assets: body['clean_room_assets'] = [v.as_dict() for v in self.clean_room_assets] - if self.collaborators: body['collaborators'] = [v.as_dict() for v in self.collaborators] - if self.creator: body['creator'] = self.creator.as_dict() - if self.station_cloud is not None: body['station_cloud'] = self.station_cloud - if self.station_region is not None: body['station_region'] = self.station_region - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CentralCleanRoomInfo: - """Deserializes the CentralCleanRoomInfo from a dictionary.""" - return cls(clean_room_assets=_repeated_dict(d, 'clean_room_assets', CleanRoomAssetInfo), - collaborators=_repeated_dict(d, 'collaborators', CleanRoomCollaboratorInfo), - creator=_from_dict(d, 'creator', CleanRoomCollaboratorInfo), - station_cloud=d.get('station_cloud', None), - station_region=d.get('station_region', None)) - - -@dataclass -class CleanRoomAssetInfo: - added_at: Optional[int] = None - """Time at which this asset was added, in epoch milliseconds.""" - - notebook_info: Optional[CleanRoomNotebookInfo] = None - """Details about the notebook asset.""" - - owner: Optional[CleanRoomCollaboratorInfo] = None - """The collaborator who owns the asset.""" - - table_info: Optional[CleanRoomTableInfo] = None - """Details about the table asset.""" - - updated_at: Optional[int] = None - """Time at which this asset was updated, in epoch milliseconds.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomAssetInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.added_at is not None: body['added_at'] = self.added_at - if self.notebook_info: body['notebook_info'] = self.notebook_info.as_dict() - if self.owner: body['owner'] = self.owner.as_dict() - if self.table_info: body['table_info'] = self.table_info.as_dict() - if self.updated_at is not None: body['updated_at'] = self.updated_at - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomAssetInfo: - """Deserializes the CleanRoomAssetInfo from a dictionary.""" - return cls(added_at=d.get('added_at', None), - notebook_info=_from_dict(d, 'notebook_info', CleanRoomNotebookInfo), - owner=_from_dict(d, 'owner', CleanRoomCollaboratorInfo), - table_info=_from_dict(d, 'table_info', CleanRoomTableInfo), - updated_at=d.get('updated_at', None)) - - -@dataclass -class CleanRoomCatalog: - catalog_name: Optional[str] = None - """Name of the catalog in the clean room station. Empty for notebooks.""" - - notebook_files: Optional[List[SharedDataObject]] = None - """The details of the shared notebook files.""" - - tables: Optional[List[SharedDataObject]] = None - """The details of the shared tables.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomCatalog into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.catalog_name is not None: body['catalog_name'] = self.catalog_name - if self.notebook_files: body['notebook_files'] = [v.as_dict() for v in self.notebook_files] - if self.tables: body['tables'] = [v.as_dict() for v in self.tables] - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomCatalog: - """Deserializes the CleanRoomCatalog from a dictionary.""" - return cls(catalog_name=d.get('catalog_name', None), - notebook_files=_repeated_dict(d, 'notebook_files', SharedDataObject), - tables=_repeated_dict(d, 'tables', SharedDataObject)) - - -@dataclass -class CleanRoomCatalogUpdate: - catalog_name: Optional[str] = None - """The name of the catalog to update assets.""" - - updates: Optional[SharedDataObjectUpdate] = None - """The updates to the assets in the catalog.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomCatalogUpdate into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.catalog_name is not None: body['catalog_name'] = self.catalog_name - if self.updates: body['updates'] = self.updates.as_dict() - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomCatalogUpdate: - """Deserializes the CleanRoomCatalogUpdate from a dictionary.""" - return cls(catalog_name=d.get('catalog_name', None), - updates=_from_dict(d, 'updates', SharedDataObjectUpdate)) - - -@dataclass -class CleanRoomCollaboratorInfo: - global_metastore_id: Optional[str] = None - """The global Unity Catalog metastore id of the collaborator. Also known as the sharing identifier. - The identifier is of format __cloud__:__region__:__metastore-uuid__.""" - - organization_name: Optional[str] = None - """The organization name of the collaborator. This is configured in the metastore for Delta Sharing - and is used to identify the organization to other collaborators.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomCollaboratorInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.global_metastore_id is not None: body['global_metastore_id'] = self.global_metastore_id - if self.organization_name is not None: body['organization_name'] = self.organization_name - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomCollaboratorInfo: - """Deserializes the CleanRoomCollaboratorInfo from a dictionary.""" - return cls(global_metastore_id=d.get('global_metastore_id', None), - organization_name=d.get('organization_name', None)) - - -@dataclass -class CleanRoomInfo: - comment: Optional[str] = None - """User-provided free-form text description.""" - - created_at: Optional[int] = None - """Time at which this clean room was created, in epoch milliseconds.""" - - created_by: Optional[str] = None - """Username of clean room creator.""" - - local_catalogs: Optional[List[CleanRoomCatalog]] = None - """Catalog aliases shared by the current collaborator with asset details.""" - - name: Optional[str] = None - """Name of the clean room.""" - - owner: Optional[str] = None - """Username of current owner of clean room.""" - - remote_detailed_info: Optional[CentralCleanRoomInfo] = None - """Central clean room details.""" - - updated_at: Optional[int] = None - """Time at which this clean room was updated, in epoch milliseconds.""" - - updated_by: Optional[str] = None - """Username of clean room updater.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.comment is not None: body['comment'] = self.comment - if self.created_at is not None: body['created_at'] = self.created_at - if self.created_by is not None: body['created_by'] = self.created_by - if self.local_catalogs: body['local_catalogs'] = [v.as_dict() for v in self.local_catalogs] - if self.name is not None: body['name'] = self.name - if self.owner is not None: body['owner'] = self.owner - if self.remote_detailed_info: body['remote_detailed_info'] = self.remote_detailed_info.as_dict() - if self.updated_at is not None: body['updated_at'] = self.updated_at - if self.updated_by is not None: body['updated_by'] = self.updated_by - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomInfo: - """Deserializes the CleanRoomInfo from a dictionary.""" - return cls(comment=d.get('comment', None), - created_at=d.get('created_at', None), - created_by=d.get('created_by', None), - local_catalogs=_repeated_dict(d, 'local_catalogs', CleanRoomCatalog), - name=d.get('name', None), - owner=d.get('owner', None), - remote_detailed_info=_from_dict(d, 'remote_detailed_info', CentralCleanRoomInfo), - updated_at=d.get('updated_at', None), - updated_by=d.get('updated_by', None)) - - -@dataclass -class CleanRoomNotebookInfo: - notebook_content: Optional[str] = None - """The base64 representation of the notebook content in HTML.""" - - notebook_name: Optional[str] = None - """The name of the notebook.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomNotebookInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.notebook_content is not None: body['notebook_content'] = self.notebook_content - if self.notebook_name is not None: body['notebook_name'] = self.notebook_name - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomNotebookInfo: - """Deserializes the CleanRoomNotebookInfo from a dictionary.""" - return cls(notebook_content=d.get('notebook_content', None), - notebook_name=d.get('notebook_name', None)) - - -@dataclass -class CleanRoomTableInfo: - catalog_name: Optional[str] = None - """Name of parent catalog.""" - - columns: Optional[List[ColumnInfo]] = None - """The array of __ColumnInfo__ definitions of the table's columns.""" - - full_name: Optional[str] = None - """Full name of table, in form of __catalog_name__.__schema_name__.__table_name__""" - - name: Optional[str] = None - """Name of table, relative to parent schema.""" - - schema_name: Optional[str] = None - """Name of parent schema relative to its parent catalog.""" - - def as_dict(self) -> dict: - """Serializes the CleanRoomTableInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.catalog_name is not None: body['catalog_name'] = self.catalog_name - if self.columns: body['columns'] = [v.as_dict() for v in self.columns] - if self.full_name is not None: body['full_name'] = self.full_name - if self.name is not None: body['name'] = self.name - if self.schema_name is not None: body['schema_name'] = self.schema_name - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CleanRoomTableInfo: - """Deserializes the CleanRoomTableInfo from a dictionary.""" - return cls(catalog_name=d.get('catalog_name', None), - columns=_repeated_dict(d, 'columns', ColumnInfo), - full_name=d.get('full_name', None), - name=d.get('name', None), - schema_name=d.get('schema_name', None)) - - -@dataclass -class ColumnInfo: - comment: Optional[str] = None - """User-provided free-form text description.""" - - mask: Optional[ColumnMask] = None - - name: Optional[str] = None - """Name of Column.""" - - nullable: Optional[bool] = None - """Whether field may be Null (default: true).""" - - partition_index: Optional[int] = None - """Partition index for column.""" - - position: Optional[int] = None - """Ordinal position of column (starting at position 0).""" - - type_interval_type: Optional[str] = None - """Format of IntervalType.""" - - type_json: Optional[str] = None - """Full data type specification, JSON-serialized.""" - - type_name: Optional[ColumnTypeName] = None - """Name of type (INT, STRUCT, MAP, etc.).""" - - type_precision: Optional[int] = None - """Digits of precision; required for DecimalTypes.""" - - type_scale: Optional[int] = None - """Digits to right of decimal; Required for DecimalTypes.""" - - type_text: Optional[str] = None - """Full data type specification as SQL/catalogString text.""" - - def as_dict(self) -> dict: - """Serializes the ColumnInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.comment is not None: body['comment'] = self.comment - if self.mask: body['mask'] = self.mask.as_dict() - if self.name is not None: body['name'] = self.name - if self.nullable is not None: body['nullable'] = self.nullable - if self.partition_index is not None: body['partition_index'] = self.partition_index - if self.position is not None: body['position'] = self.position - if self.type_interval_type is not None: body['type_interval_type'] = self.type_interval_type - if self.type_json is not None: body['type_json'] = self.type_json - if self.type_name is not None: body['type_name'] = self.type_name.value - if self.type_precision is not None: body['type_precision'] = self.type_precision - if self.type_scale is not None: body['type_scale'] = self.type_scale - if self.type_text is not None: body['type_text'] = self.type_text - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> ColumnInfo: - """Deserializes the ColumnInfo from a dictionary.""" - return cls(comment=d.get('comment', None), - mask=_from_dict(d, 'mask', ColumnMask), - name=d.get('name', None), - nullable=d.get('nullable', None), - partition_index=d.get('partition_index', None), - position=d.get('position', None), - type_interval_type=d.get('type_interval_type', None), - type_json=d.get('type_json', None), - type_name=_enum(d, 'type_name', ColumnTypeName), - type_precision=d.get('type_precision', None), - type_scale=d.get('type_scale', None), - type_text=d.get('type_text', None)) - - -@dataclass -class ColumnMask: - function_name: Optional[str] = None - """The full name of the column mask SQL UDF.""" - - using_column_names: Optional[List[str]] = None - """The list of additional table columns to be passed as input to the column mask function. The - first arg of the mask function should be of the type of the column being masked and the types of - the rest of the args should match the types of columns in 'using_column_names'.""" - - def as_dict(self) -> dict: - """Serializes the ColumnMask into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.function_name is not None: body['function_name'] = self.function_name - if self.using_column_names: body['using_column_names'] = [v for v in self.using_column_names] - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> ColumnMask: - """Deserializes the ColumnMask from a dictionary.""" - return cls(function_name=d.get('function_name', None), - using_column_names=d.get('using_column_names', None)) - - -class ColumnTypeName(Enum): - """Name of type (INT, STRUCT, MAP, etc.).""" - - ARRAY = 'ARRAY' - BINARY = 'BINARY' - BOOLEAN = 'BOOLEAN' - BYTE = 'BYTE' - CHAR = 'CHAR' - DATE = 'DATE' - DECIMAL = 'DECIMAL' - DOUBLE = 'DOUBLE' - FLOAT = 'FLOAT' - INT = 'INT' - INTERVAL = 'INTERVAL' - LONG = 'LONG' - MAP = 'MAP' - NULL = 'NULL' - SHORT = 'SHORT' - STRING = 'STRING' - STRUCT = 'STRUCT' - TABLE_TYPE = 'TABLE_TYPE' - TIMESTAMP = 'TIMESTAMP' - TIMESTAMP_NTZ = 'TIMESTAMP_NTZ' - USER_DEFINED_TYPE = 'USER_DEFINED_TYPE' - - -@dataclass -class CreateCleanRoom: - name: str - """Name of the clean room.""" - - remote_detailed_info: CentralCleanRoomInfo - """Central clean room details.""" - - comment: Optional[str] = None - """User-provided free-form text description.""" - - def as_dict(self) -> dict: - """Serializes the CreateCleanRoom into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.comment is not None: body['comment'] = self.comment - if self.name is not None: body['name'] = self.name - if self.remote_detailed_info: body['remote_detailed_info'] = self.remote_detailed_info.as_dict() - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> CreateCleanRoom: - """Deserializes the CreateCleanRoom from a dictionary.""" - return cls(comment=d.get('comment', None), - name=d.get('name', None), - remote_detailed_info=_from_dict(d, 'remote_detailed_info', CentralCleanRoomInfo)) - - @dataclass class CreateProvider: name: str @@ -623,29 +211,6 @@ def from_dict(cls, d: Dict[str, any]) -> IpAccessList: return cls(allowed_ip_addresses=d.get('allowed_ip_addresses', None)) -@dataclass -class ListCleanRoomsResponse: - clean_rooms: Optional[List[CleanRoomInfo]] = None - """An array of clean rooms. Remote details (central) are not included.""" - - next_page_token: Optional[str] = None - """Opaque token to retrieve the next page of results. Absent if there are no more pages. - __page_token__ should be set to this value for the next request (for the next page of results).""" - - def as_dict(self) -> dict: - """Serializes the ListCleanRoomsResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.clean_rooms: body['clean_rooms'] = [v.as_dict() for v in self.clean_rooms] - if self.next_page_token is not None: body['next_page_token'] = self.next_page_token - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> ListCleanRoomsResponse: - """Deserializes the ListCleanRoomsResponse from a dictionary.""" - return cls(clean_rooms=_repeated_dict(d, 'clean_rooms', CleanRoomInfo), - next_page_token=d.get('next_page_token', None)) - - @dataclass class ListProviderSharesResponse: next_page_token: Optional[str] = None @@ -1473,38 +1038,6 @@ class SharedDataObjectUpdateAction(Enum): UPDATE = 'UPDATE' -@dataclass -class UpdateCleanRoom: - catalog_updates: Optional[List[CleanRoomCatalogUpdate]] = None - """Array of shared data object updates.""" - - comment: Optional[str] = None - """User-provided free-form text description.""" - - name: Optional[str] = None - """The name of the clean room.""" - - owner: Optional[str] = None - """Username of current owner of clean room.""" - - def as_dict(self) -> dict: - """Serializes the UpdateCleanRoom into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.catalog_updates: body['catalog_updates'] = [v.as_dict() for v in self.catalog_updates] - if self.comment is not None: body['comment'] = self.comment - if self.name is not None: body['name'] = self.name - if self.owner is not None: body['owner'] = self.owner - return body - - @classmethod - def from_dict(cls, d: Dict[str, any]) -> UpdateCleanRoom: - """Deserializes the UpdateCleanRoom from a dictionary.""" - return cls(catalog_updates=_repeated_dict(d, 'catalog_updates', CleanRoomCatalogUpdate), - comment=d.get('comment', None), - name=d.get('name', None), - owner=d.get('owner', None)) - - @dataclass class UpdatePermissionsResponse: @@ -1699,157 +1232,6 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateSharePermissions: page_token=d.get('page_token', None)) -class CleanRoomsAPI: - """A clean room is a secure, privacy-protecting environment where two or more parties can share sensitive - enterprise data, including customer data, for measurements, insights, activation and other use cases. - - To create clean rooms, you must be a metastore admin or a user with the **CREATE_CLEAN_ROOM** privilege.""" - - def __init__(self, api_client): - self._api = api_client - - def create(self, - name: str, - remote_detailed_info: CentralCleanRoomInfo, - *, - comment: Optional[str] = None) -> CleanRoomInfo: - """Create a clean room. - - Creates a new clean room with specified colaborators. The caller must be a metastore admin or have the - **CREATE_CLEAN_ROOM** privilege on the metastore. - - :param name: str - Name of the clean room. - :param remote_detailed_info: :class:`CentralCleanRoomInfo` - Central clean room details. - :param comment: str (optional) - User-provided free-form text description. - - :returns: :class:`CleanRoomInfo` - """ - body = {} - if comment is not None: body['comment'] = comment - if name is not None: body['name'] = name - if remote_detailed_info is not None: body['remote_detailed_info'] = remote_detailed_info.as_dict() - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } - - res = self._api.do('POST', '/api/2.1/unity-catalog/clean-rooms', body=body, headers=headers) - return CleanRoomInfo.from_dict(res) - - def delete(self, name: str): - """Delete a clean room. - - Deletes a data object clean room from the metastore. The caller must be an owner of the clean room. - - :param name: str - The name of the clean room. - - - """ - - headers = {'Accept': 'application/json', } - - self._api.do('DELETE', f'/api/2.1/unity-catalog/clean-rooms/{name}', headers=headers) - - def get(self, name: str, *, include_remote_details: Optional[bool] = None) -> CleanRoomInfo: - """Get a clean room. - - Gets a data object clean room from the metastore. The caller must be a metastore admin or the owner of - the clean room. - - :param name: str - The name of the clean room. - :param include_remote_details: bool (optional) - Whether to include remote details (central) on the clean room. - - :returns: :class:`CleanRoomInfo` - """ - - query = {} - if include_remote_details is not None: query['include_remote_details'] = include_remote_details - headers = {'Accept': 'application/json', } - - res = self._api.do('GET', f'/api/2.1/unity-catalog/clean-rooms/{name}', query=query, headers=headers) - return CleanRoomInfo.from_dict(res) - - def list(self, - *, - max_results: Optional[int] = None, - page_token: Optional[str] = None) -> Iterator[CleanRoomInfo]: - """List clean rooms. - - Gets an array of data object clean rooms from the metastore. The caller must be a metastore admin or - the owner of the clean room. There is no guarantee of a specific ordering of the elements in the - array. - - :param max_results: int (optional) - Maximum number of clean rooms to return. If not set, all the clean rooms are returned (not - recommended). - when set to a value greater than 0, the page length is the minimum of this value and - a server configured value; - when set to 0, the page length is set to a server configured value - (recommended); - when set to a value less than 0, an invalid parameter error is returned; - :param page_token: str (optional) - Opaque pagination token to go to next page based on previous query. - - :returns: Iterator over :class:`CleanRoomInfo` - """ - - query = {} - if max_results is not None: query['max_results'] = max_results - if page_token is not None: query['page_token'] = page_token - headers = {'Accept': 'application/json', } - - while True: - json = self._api.do('GET', '/api/2.1/unity-catalog/clean-rooms', query=query, headers=headers) - if 'clean_rooms' in json: - for v in json['clean_rooms']: - yield CleanRoomInfo.from_dict(v) - if 'next_page_token' not in json or not json['next_page_token']: - return - query['page_token'] = json['next_page_token'] - - def update(self, - name: str, - *, - catalog_updates: Optional[List[CleanRoomCatalogUpdate]] = None, - comment: Optional[str] = None, - owner: Optional[str] = None) -> CleanRoomInfo: - """Update a clean room. - - Updates the clean room with the changes and data objects in the request. The caller must be the owner - of the clean room or a metastore admin. - - When the caller is a metastore admin, only the __owner__ field can be updated. - - In the case that the clean room name is changed **updateCleanRoom** requires that the caller is both - the clean room owner and a metastore admin. - - For each table that is added through this method, the clean room owner must also have **SELECT** - privilege on the table. The privilege must be maintained indefinitely for recipients to be able to - access the table. Typically, you should use a group as the clean room owner. - - Table removals through **update** do not require additional privileges. - - :param name: str - The name of the clean room. - :param catalog_updates: List[:class:`CleanRoomCatalogUpdate`] (optional) - Array of shared data object updates. - :param comment: str (optional) - User-provided free-form text description. - :param owner: str (optional) - Username of current owner of clean room. - - :returns: :class:`CleanRoomInfo` - """ - body = {} - if catalog_updates is not None: body['catalog_updates'] = [v.as_dict() for v in catalog_updates] - if comment is not None: body['comment'] = comment - if owner is not None: body['owner'] = owner - headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } - - res = self._api.do('PATCH', f'/api/2.1/unity-catalog/clean-rooms/{name}', body=body, headers=headers) - return CleanRoomInfo.from_dict(res) - - class ProvidersAPI: """A data provider is an object representing the organization in the real world who shares the data. A provider contains shares which further contain the shared data.""" diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 7a224fee..390aee5e 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -507,7 +507,7 @@ class ChannelName(Enum): CHANNEL_NAME_CURRENT = 'CHANNEL_NAME_CURRENT' CHANNEL_NAME_CUSTOM = 'CHANNEL_NAME_CUSTOM' CHANNEL_NAME_PREVIEW = 'CHANNEL_NAME_PREVIEW' - CHANNEL_NAME_UNSPECIFIED = 'CHANNEL_NAME_UNSPECIFIED' + CHANNEL_NAME_PREVIOUS = 'CHANNEL_NAME_PREVIOUS' @dataclass @@ -6579,11 +6579,10 @@ class StatementExecutionAPI: outstanding statement might have already completed execution when the cancel request arrives. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for things such as caller delays and network - latency from caller to service. - The system will auto-close a statement after one hour if the client - stops polling and thus you must poll at least once an hour. - The results are only available for one hour - after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle - of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL - Execution API to cancel it. + latency from caller to service. - To guarantee that the statement is kept alive, you must poll at least + once every 15 minutes. - The results are only available for one hour after success; polling does not + extend this. - The SQL Execution API must be used for the entire lifecycle of the statement. For example, + you cannot use the Jobs API to execute the command, and then the SQL Execution API to cancel it. [Apache Arrow Columnar]: https://arrow.apache.org/overview/ [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html""" @@ -7243,7 +7242,8 @@ def set_permissions(self, ) -> WarehousePermissions: """Set SQL warehouse permissions. - Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param warehouse_id: str The SQL warehouse for which to get or manage permissions. diff --git a/databricks/sdk/service/workspace.py b/databricks/sdk/service/workspace.py index 7c8bfbd5..01c463a0 100755 --- a/databricks/sdk/service/workspace.py +++ b/databricks/sdk/service/workspace.py @@ -1897,7 +1897,8 @@ def set_permissions( access_control_list: Optional[List[RepoAccessControlRequest]] = None) -> RepoPermissions: """Set repo permissions. - Sets permissions on a repo. Repos can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param repo_id: str The repo for which to get or manage permissions. @@ -2527,8 +2528,9 @@ def set_permissions( ) -> WorkspaceObjectPermissions: """Set workspace object permissions. - Sets permissions on a workspace object. Workspace objects can inherit permissions from their parent - objects or root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their parent objects or root + object. :param workspace_object_type: str The workspace object type for which to get or manage permissions. diff --git a/databricks/sdk/version.py b/databricks/sdk/version.py index aae5aca6..8935b5b5 100644 --- a/databricks/sdk/version.py +++ b/databricks/sdk/version.py @@ -1 +1 @@ -__version__ = '0.36.0' +__version__ = '0.37.0' diff --git a/docs/account/oauth2/service_principal_secrets.rst b/docs/account/oauth2/service_principal_secrets.rst index 4249b9de..955d6da5 100644 --- a/docs/account/oauth2/service_principal_secrets.rst +++ b/docs/account/oauth2/service_principal_secrets.rst @@ -42,7 +42,7 @@ - .. py:method:: list(service_principal_id: int) -> Iterator[SecretInfo] + .. py:method:: list(service_principal_id: int [, page_token: Optional[str]]) -> Iterator[SecretInfo] List service principal secrets. @@ -51,6 +51,13 @@ :param service_principal_id: int The service principal ID. + :param page_token: str (optional) + An opaque page token which was the `next_page_token` in the response of the previous request to list + the secrets for this service principal. Provide this token to retrieve the next page of secret + entries. When providing a `page_token`, all other parameters provided to the request must match the + previous request. To list all of the secrets for a service principal, it is necessary to continue + requesting pages of entries until the response contains no `next_page_token`. Note that the number + of entries returned must not be used to determine when the listing is complete. :returns: Iterator over :class:`SecretInfo` \ No newline at end of file diff --git a/docs/account/provisioning/workspaces.rst b/docs/account/provisioning/workspaces.rst index 98c47cc9..fa1d130b 100644 --- a/docs/account/provisioning/workspaces.rst +++ b/docs/account/provisioning/workspaces.rst @@ -11,7 +11,7 @@ These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - .. py:method:: create(workspace_name: str [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str]]) -> Wait[Workspace] + .. py:method:: create(workspace_name: str [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], is_no_public_ip_enabled: Optional[bool], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str]]) -> Wait[Workspace] Usage: @@ -116,6 +116,8 @@ [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html :param gke_config: :class:`GkeConfig` (optional) The configurations for the GKE cluster of a Databricks workspace. + :param is_no_public_ip_enabled: bool (optional) + Whether no public IP is enabled for the workspace. :param location: str (optional) The Google Cloud region of the workspace data plane in your Google account. For example, `us-east4`. :param managed_services_customer_managed_key_id: str (optional) @@ -148,7 +150,7 @@ See :method:wait_get_workspace_running for more details. - .. py:method:: create_and_wait(workspace_name: str [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace + .. py:method:: create_and_wait(workspace_name: str [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], is_no_public_ip_enabled: Optional[bool], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace .. py:method:: delete(workspace_id: int) diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 2d522c62..2214e2ac 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -190,14 +190,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateAppDeploymentRequest - :members: - :undoc-members: - -.. autoclass:: CreateAppRequest - :members: - :undoc-members: - .. autoclass:: GetAppPermissionLevelsResponse :members: :undoc-members: @@ -217,7 +209,3 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: StopAppRequest :members: :undoc-members: - -.. autoclass:: UpdateAppRequest - :members: - :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index cb639934..9f5fef3b 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -69,6 +69,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AwsIamRole + :members: + :undoc-members: + .. autoclass:: AwsIamRoleRequest :members: :undoc-members: @@ -77,6 +81,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AzureActiveDirectoryToken + :members: + :undoc-members: + +.. autoclass:: AzureManagedIdentity + :members: + :undoc-members: + .. autoclass:: AzureManagedIdentityRequest :members: :undoc-members: @@ -246,6 +258,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: USER_DEFINED_TYPE :value: "USER_DEFINED_TYPE" + .. py:attribute:: VARIANT + :value: "VARIANT" + .. autoclass:: ConnectionInfo :members: :undoc-members: @@ -342,6 +357,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateCredentialRequest + :members: + :undoc-members: + .. autoclass:: CreateExternalLocation :members: :undoc-members: @@ -373,7 +392,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CreateFunctionSecurityType - Function security type. + The security type of the function. .. py:attribute:: DEFINER :value: "DEFINER" @@ -403,10 +422,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateOnlineTableRequest - :members: - :undoc-members: - .. autoclass:: CreateRegisteredModelRequest :members: :undoc-members: @@ -431,6 +446,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CredentialInfo + :members: + :undoc-members: + +.. py:class:: CredentialPurpose + + .. py:attribute:: SERVICE + :value: "SERVICE" + .. py:class:: CredentialType The type of credential. @@ -441,6 +465,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: USERNAME_PASSWORD :value: "USERNAME_PASSWORD" +.. autoclass:: CredentialValidationResult + :members: + :undoc-members: + .. autoclass:: CurrentWorkspaceBindings :members: :undoc-members: @@ -530,6 +558,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteCredentialResponse + :members: + :undoc-members: + .. autoclass:: DeleteResponse :members: :undoc-members: @@ -636,7 +668,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: FunctionInfoSecurityType - Function security type. + The security type of the function. .. py:attribute:: DEFINER :value: "DEFINER" @@ -683,6 +715,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GenerateTemporaryServiceCredentialAzureOptions + :members: + :undoc-members: + +.. autoclass:: GenerateTemporaryServiceCredentialRequest + :members: + :undoc-members: + .. autoclass:: GenerateTemporaryTableCredentialRequest :members: :undoc-members: @@ -699,6 +739,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXTERNAL_LOCATION :value: "EXTERNAL_LOCATION" + .. py:attribute:: SERVICE_CREDENTIAL + :value: "SERVICE_CREDENTIAL" + .. py:attribute:: STORAGE_CREDENTIAL :value: "STORAGE_CREDENTIAL" @@ -722,8 +765,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: IsolationMode - Whether the current securable is accessible from all workspaces or a specific set of workspaces. - .. py:attribute:: ISOLATION_MODE_ISOLATED :value: "ISOLATION_MODE_ISOLATED" @@ -746,6 +787,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListCredentialsResponse + :members: + :undoc-members: + .. autoclass:: ListExternalLocationsResponse :members: :undoc-members: @@ -1239,6 +1284,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CONNECTION :value: "CONNECTION" + .. py:attribute:: CREDENTIAL + :value: "CREDENTIAL" + .. py:attribute:: EXTERNAL_LOCATION :value: "EXTERNAL_LOCATION" @@ -1379,6 +1427,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VIEW :value: "VIEW" +.. autoclass:: TemporaryCredentials + :members: + :undoc-members: + .. autoclass:: TriggeredUpdateStatus :members: :undoc-members: @@ -1399,6 +1451,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXTERNAL_LOCATION :value: "EXTERNAL_LOCATION" + .. py:attribute:: SERVICE_CREDENTIAL + :value: "SERVICE_CREDENTIAL" + .. py:attribute:: STORAGE_CREDENTIAL :value: "STORAGE_CREDENTIAL" @@ -1410,6 +1465,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateCredentialRequest + :members: + :undoc-members: + .. autoclass:: UpdateExternalLocation :members: :undoc-members: @@ -1476,6 +1535,27 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ValidateCredentialRequest + :members: + :undoc-members: + +.. autoclass:: ValidateCredentialResponse + :members: + :undoc-members: + +.. py:class:: ValidateCredentialResult + + A enum represents the result of the file operation + + .. py:attribute:: FAIL + :value: "FAIL" + + .. py:attribute:: PASS + :value: "PASS" + + .. py:attribute:: SKIP + :value: "SKIP" + .. autoclass:: ValidateStorageCredential :members: :undoc-members: diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 91de6ccb..3d07ed34 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -4,18 +4,6 @@ Dashboards These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.dashboards`` module. .. py:currentmodule:: databricks.sdk.service.dashboards -.. autoclass:: CreateDashboardRequest - :members: - :undoc-members: - -.. autoclass:: CreateScheduleRequest - :members: - :undoc-members: - -.. autoclass:: CreateSubscriptionRequest - :members: - :undoc-members: - .. autoclass:: CronSchedule :members: :undoc-members: @@ -166,6 +154,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION :value: "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION" + .. py:attribute:: NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE + :value: "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE" + .. py:attribute:: NO_QUERY_TO_VISUALIZE_EXCEPTION :value: "NO_QUERY_TO_VISUALIZE_EXCEPTION" @@ -298,11 +289,3 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: UnpublishDashboardResponse :members: :undoc-members: - -.. autoclass:: UpdateDashboardRequest - :members: - :undoc-members: - -.. autoclass:: UpdateScheduleRequest - :members: - :undoc-members: diff --git a/docs/dbdataclasses/marketplace.rst b/docs/dbdataclasses/marketplace.rst index bb48967d..c1029d84 100644 --- a/docs/dbdataclasses/marketplace.rst +++ b/docs/dbdataclasses/marketplace.rst @@ -29,6 +29,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ASSET_TYPE_NOTEBOOK :value: "ASSET_TYPE_NOTEBOOK" + .. py:attribute:: ASSET_TYPE_PARTNER_INTEGRATION + :value: "ASSET_TYPE_PARTNER_INTEGRATION" + .. autoclass:: BatchGetListingsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/provisioning.rst b/docs/dbdataclasses/provisioning.rst index 7990eae9..4c909d48 100644 --- a/docs/dbdataclasses/provisioning.rst +++ b/docs/dbdataclasses/provisioning.rst @@ -106,6 +106,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VPC :value: "VPC" +.. autoclass:: ExternalCustomerInfo + :members: + :undoc-members: + .. autoclass:: GcpKeyInfo :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 12043e3c..7d556f8a 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -4,6 +4,33 @@ Settings These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.settings`` module. .. py:currentmodule:: databricks.sdk.service.settings +.. autoclass:: AibiDashboardEmbeddingAccessPolicy + :members: + :undoc-members: + +.. py:class:: AibiDashboardEmbeddingAccessPolicyAccessPolicyType + + .. py:attribute:: ALLOW_ALL_DOMAINS + :value: "ALLOW_ALL_DOMAINS" + + .. py:attribute:: ALLOW_APPROVED_DOMAINS + :value: "ALLOW_APPROVED_DOMAINS" + + .. py:attribute:: DENY_ALL_DOMAINS + :value: "DENY_ALL_DOMAINS" + +.. autoclass:: AibiDashboardEmbeddingAccessPolicySetting + :members: + :undoc-members: + +.. autoclass:: AibiDashboardEmbeddingApprovedDomains + :members: + :undoc-members: + +.. autoclass:: AibiDashboardEmbeddingApprovedDomainsSetting + :members: + :undoc-members: + .. autoclass:: AutomaticClusterUpdateSetting :members: :undoc-members: @@ -543,6 +570,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: AZURE_ACTIVE_DIRECTORY_TOKEN :value: "AZURE_ACTIVE_DIRECTORY_TOKEN" +.. autoclass:: UpdateAibiDashboardEmbeddingAccessPolicySettingRequest + :members: + :undoc-members: + +.. autoclass:: UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest + :members: + :undoc-members: + .. autoclass:: UpdateAutomaticClusterUpdateSettingRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/sharing.rst b/docs/dbdataclasses/sharing.rst index ded587fe..cd4c2dce 100644 --- a/docs/dbdataclasses/sharing.rst +++ b/docs/dbdataclasses/sharing.rst @@ -14,117 +14,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TOKEN :value: "TOKEN" -.. autoclass:: CentralCleanRoomInfo - :members: - :undoc-members: - -.. autoclass:: CleanRoomAssetInfo - :members: - :undoc-members: - -.. autoclass:: CleanRoomCatalog - :members: - :undoc-members: - -.. autoclass:: CleanRoomCatalogUpdate - :members: - :undoc-members: - -.. autoclass:: CleanRoomCollaboratorInfo - :members: - :undoc-members: - -.. autoclass:: CleanRoomInfo - :members: - :undoc-members: - -.. autoclass:: CleanRoomNotebookInfo - :members: - :undoc-members: - -.. autoclass:: CleanRoomTableInfo - :members: - :undoc-members: - -.. autoclass:: ColumnInfo - :members: - :undoc-members: - -.. autoclass:: ColumnMask - :members: - :undoc-members: - -.. py:class:: ColumnTypeName - - Name of type (INT, STRUCT, MAP, etc.). - - .. py:attribute:: ARRAY - :value: "ARRAY" - - .. py:attribute:: BINARY - :value: "BINARY" - - .. py:attribute:: BOOLEAN - :value: "BOOLEAN" - - .. py:attribute:: BYTE - :value: "BYTE" - - .. py:attribute:: CHAR - :value: "CHAR" - - .. py:attribute:: DATE - :value: "DATE" - - .. py:attribute:: DECIMAL - :value: "DECIMAL" - - .. py:attribute:: DOUBLE - :value: "DOUBLE" - - .. py:attribute:: FLOAT - :value: "FLOAT" - - .. py:attribute:: INT - :value: "INT" - - .. py:attribute:: INTERVAL - :value: "INTERVAL" - - .. py:attribute:: LONG - :value: "LONG" - - .. py:attribute:: MAP - :value: "MAP" - - .. py:attribute:: NULL - :value: "NULL" - - .. py:attribute:: SHORT - :value: "SHORT" - - .. py:attribute:: STRING - :value: "STRING" - - .. py:attribute:: STRUCT - :value: "STRUCT" - - .. py:attribute:: TABLE_TYPE - :value: "TABLE_TYPE" - - .. py:attribute:: TIMESTAMP - :value: "TIMESTAMP" - - .. py:attribute:: TIMESTAMP_NTZ - :value: "TIMESTAMP_NTZ" - - .. py:attribute:: USER_DEFINED_TYPE - :value: "USER_DEFINED_TYPE" - -.. autoclass:: CreateCleanRoom - :members: - :undoc-members: - .. autoclass:: CreateProvider :members: :undoc-members: @@ -153,10 +42,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListCleanRoomsResponse - :members: - :undoc-members: - .. autoclass:: ListProviderSharesResponse :members: :undoc-members: @@ -435,10 +320,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UPDATE :value: "UPDATE" -.. autoclass:: UpdateCleanRoom - :members: - :undoc-members: - .. autoclass:: UpdatePermissionsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 1657146c..1a252f7c 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -114,8 +114,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CHANNEL_NAME_PREVIEW :value: "CHANNEL_NAME_PREVIEW" - .. py:attribute:: CHANNEL_NAME_UNSPECIFIED - :value: "CHANNEL_NAME_UNSPECIFIED" + .. py:attribute:: CHANNEL_NAME_PREVIOUS + :value: "CHANNEL_NAME_PREVIOUS" .. autoclass:: ColumnInfo :members: diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index 774e75b8..a2494124 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -7,26 +7,20 @@ Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. - .. py:method:: create(name: str [, description: Optional[str], resources: Optional[List[AppResource]]]) -> Wait[App] + .. py:method:: create( [, app: Optional[App]]) -> Wait[App] Create an app. Creates a new app. - :param name: str - The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It - must be unique within the workspace. - :param description: str (optional) - The description of the app. - :param resources: List[:class:`AppResource`] (optional) - Resources for the app. + :param app: :class:`App` (optional) :returns: Long-running operation waiter for :class:`App`. See :method:wait_get_app_active for more details. - .. py:method:: create_and_wait(name: str [, description: Optional[str], resources: Optional[List[AppResource]], timeout: datetime.timedelta = 0:20:00]) -> App + .. py:method:: create_and_wait( [, app: Optional[App], timeout: datetime.timedelta = 0:20:00]) -> App .. py:method:: delete(name: str) -> App @@ -41,7 +35,7 @@ :returns: :class:`App` - .. py:method:: deploy(app_name: str [, deployment_id: Optional[str], mode: Optional[AppDeploymentMode], source_code_path: Optional[str]]) -> Wait[AppDeployment] + .. py:method:: deploy(app_name: str [, app_deployment: Optional[AppDeployment]]) -> Wait[AppDeployment] Create an app deployment. @@ -49,23 +43,14 @@ :param app_name: str The name of the app. - :param deployment_id: str (optional) - The unique id of the deployment. - :param mode: :class:`AppDeploymentMode` (optional) - The mode of which the deployment will manage the source code. - :param source_code_path: str (optional) - The workspace file system path of the source code used to create the app deployment. This is - different from `deployment_artifacts.source_code_path`, which is the path used by the deployed app. - The former refers to the original source code location of the app in the workspace during deployment - creation, whereas the latter provides a system generated stable snapshotted source code path used by - the deployment. + :param app_deployment: :class:`AppDeployment` (optional) :returns: Long-running operation waiter for :class:`AppDeployment`. See :method:wait_get_deployment_app_succeeded for more details. - .. py:method:: deploy_and_wait(app_name: str [, deployment_id: Optional[str], mode: Optional[AppDeploymentMode], source_code_path: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> AppDeployment + .. py:method:: deploy_and_wait(app_name: str [, app_deployment: Optional[AppDeployment], timeout: datetime.timedelta = 0:20:00]) -> AppDeployment .. py:method:: get(name: str) -> App @@ -152,7 +137,8 @@ Set app permissions. - Sets permissions on an app. Apps can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param app_name: str The app for which to get or manage permissions. @@ -195,19 +181,15 @@ .. py:method:: stop_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> App - .. py:method:: update(name: str [, description: Optional[str], resources: Optional[List[AppResource]]]) -> App + .. py:method:: update(name: str [, app: Optional[App]]) -> App Update an app. Updates the app with the supplied name. :param name: str - The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It - must be unique within the workspace. - :param description: str (optional) - The description of the app. - :param resources: List[:class:`AppResource`] (optional) - Resources for the app. + The name of the app. + :param app: :class:`App` (optional) :returns: :class:`App` diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 365007b0..fc60b18f 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -221,7 +221,6 @@ :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. :param isolation_mode: :class:`IsolationMode` (optional) - Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the external location. :param owner: str (optional) diff --git a/docs/workspace/catalog/online_tables.rst b/docs/workspace/catalog/online_tables.rst index 164832b0..d0119657 100644 --- a/docs/workspace/catalog/online_tables.rst +++ b/docs/workspace/catalog/online_tables.rst @@ -6,20 +6,23 @@ Online tables provide lower latency and higher QPS access to data from Delta tables. - .. py:method:: create( [, name: Optional[str], spec: Optional[OnlineTableSpec]]) -> OnlineTable + .. py:method:: create( [, table: Optional[OnlineTable]]) -> Wait[OnlineTable] Create an Online Table. Create a new Online Table. - :param name: str (optional) - Full three-part (catalog, schema, table) name of the table. - :param spec: :class:`OnlineTableSpec` (optional) - Specification of the online table. + :param table: :class:`OnlineTable` (optional) + Online Table information. - :returns: :class:`OnlineTable` + :returns: + Long-running operation waiter for :class:`OnlineTable`. + See :method:wait_get_online_table_active for more details. + .. py:method:: create_and_wait( [, table: Optional[OnlineTable], timeout: datetime.timedelta = 0:20:00]) -> OnlineTable + + .. py:method:: delete(name: str) Delete an Online Table. @@ -44,4 +47,6 @@ Full three-part (catalog, schema, table) name of the table. :returns: :class:`OnlineTable` - \ No newline at end of file + + + .. py:method:: wait_get_online_table_active(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[OnlineTable], None]]) -> OnlineTable diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 30b04654..cac70a94 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -193,7 +193,6 @@ :param force: bool (optional) Force update even if there are dependent external locations or external tables. :param isolation_mode: :class:`IsolationMode` (optional) - Whether the current securable is accessible from all workspaces or a specific set of workspaces. :param new_name: str (optional) New name for the storage credential. :param owner: str (optional) diff --git a/docs/workspace/compute/cluster_policies.rst b/docs/workspace/compute/cluster_policies.rst index 1cefc8ca..65066964 100644 --- a/docs/workspace/compute/cluster_policies.rst +++ b/docs/workspace/compute/cluster_policies.rst @@ -267,7 +267,8 @@ Set cluster policy permissions. - Sets permissions on a cluster policy. Cluster policies can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param cluster_policy_id: str The cluster policy for which to get or manage permissions. diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index ac52edec..24fe2d25 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -341,7 +341,7 @@ Clusters created by the Databricks Jobs service cannot be edited. :param cluster_id: str - ID of the cluser + ID of the cluster :param spark_version: str The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call. @@ -906,7 +906,8 @@ Set cluster permissions. - Sets permissions on a cluster. Clusters can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param cluster_id: str The cluster for which to get or manage permissions. diff --git a/docs/workspace/compute/instance_pools.rst b/docs/workspace/compute/instance_pools.rst index 27784417..333c4493 100644 --- a/docs/workspace/compute/instance_pools.rst +++ b/docs/workspace/compute/instance_pools.rst @@ -245,7 +245,8 @@ Set instance pool permissions. - Sets permissions on an instance pool. Instance pools can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param instance_pool_id: str The instance pool for which to get or manage permissions. diff --git a/docs/workspace/dashboards/lakeview.rst b/docs/workspace/dashboards/lakeview.rst index fe358063..0fe55542 100644 --- a/docs/workspace/dashboards/lakeview.rst +++ b/docs/workspace/dashboards/lakeview.rst @@ -7,47 +7,29 @@ These APIs provide specific management operations for Lakeview dashboards. Generic resource management can be done with Workspace API (import, export, get-status, list, delete). - .. py:method:: create(display_name: str [, parent_path: Optional[str], serialized_dashboard: Optional[str], warehouse_id: Optional[str]]) -> Dashboard + .. py:method:: create( [, dashboard: Optional[Dashboard]]) -> Dashboard Create dashboard. Create a draft dashboard. - :param display_name: str - The display name of the dashboard. - :param parent_path: str (optional) - The workspace path of the folder containing the dashboard. Includes leading slash and no trailing - slash. This field is excluded in List Dashboards responses. - :param serialized_dashboard: str (optional) - The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. Use the [get dashboard API] to retrieve an example response, which includes the - `serialized_dashboard` field. This field provides the structure of the JSON string that represents - the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - :param warehouse_id: str (optional) - The warehouse ID used to run the dashboard. + :param dashboard: :class:`Dashboard` (optional) :returns: :class:`Dashboard` - .. py:method:: create_schedule(dashboard_id: str, cron_schedule: CronSchedule [, display_name: Optional[str], pause_status: Optional[SchedulePauseStatus]]) -> Schedule + .. py:method:: create_schedule(dashboard_id: str [, schedule: Optional[Schedule]]) -> Schedule Create dashboard schedule. :param dashboard_id: str UUID identifying the dashboard to which the schedule belongs. - :param cron_schedule: :class:`CronSchedule` - The cron expression describing the frequency of the periodic refresh for this schedule. - :param display_name: str (optional) - The display name for schedule. - :param pause_status: :class:`SchedulePauseStatus` (optional) - The status indicates whether this schedule is paused or not. + :param schedule: :class:`Schedule` (optional) :returns: :class:`Schedule` - .. py:method:: create_subscription(dashboard_id: str, schedule_id: str, subscriber: Subscriber) -> Subscription + .. py:method:: create_subscription(dashboard_id: str, schedule_id: str [, subscription: Optional[Subscription]]) -> Subscription Create schedule subscription. @@ -55,8 +37,7 @@ UUID identifying the dashboard to which the subscription belongs. :param schedule_id: str UUID identifying the schedule to which the subscription belongs. - :param subscriber: :class:`Subscriber` - Subscriber details for users and destinations to be added as subscribers to the schedule. + :param subscription: :class:`Subscription` (optional) :returns: :class:`Subscription` @@ -250,7 +231,7 @@ - .. py:method:: update(dashboard_id: str [, display_name: Optional[str], etag: Optional[str], serialized_dashboard: Optional[str], warehouse_id: Optional[str]]) -> Dashboard + .. py:method:: update(dashboard_id: str [, dashboard: Optional[Dashboard]]) -> Dashboard Update dashboard. @@ -258,25 +239,12 @@ :param dashboard_id: str UUID identifying the dashboard. - :param display_name: str (optional) - The display name of the dashboard. - :param etag: str (optional) - The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard has - not been modified since the last read. This field is excluded in List Dashboards responses. - :param serialized_dashboard: str (optional) - The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. Use the [get dashboard API] to retrieve an example response, which includes the - `serialized_dashboard` field. This field provides the structure of the JSON string that represents - the dashboard's layout and components. - - [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get - :param warehouse_id: str (optional) - The warehouse ID used to run the dashboard. + :param dashboard: :class:`Dashboard` (optional) :returns: :class:`Dashboard` - .. py:method:: update_schedule(dashboard_id: str, schedule_id: str, cron_schedule: CronSchedule [, display_name: Optional[str], etag: Optional[str], pause_status: Optional[SchedulePauseStatus]]) -> Schedule + .. py:method:: update_schedule(dashboard_id: str, schedule_id: str [, schedule: Optional[Schedule]]) -> Schedule Update dashboard schedule. @@ -284,15 +252,7 @@ UUID identifying the dashboard to which the schedule belongs. :param schedule_id: str UUID identifying the schedule. - :param cron_schedule: :class:`CronSchedule` - The cron expression describing the frequency of the periodic refresh for this schedule. - :param display_name: str (optional) - The display name for schedule. - :param etag: str (optional) - The etag for the schedule. Must be left empty on create, must be provided on updates to ensure that - the schedule has not been modified since the last read, and can be optionally provided on delete. - :param pause_status: :class:`SchedulePauseStatus` (optional) - The status indicates whether this schedule is paused or not. + :param schedule: :class:`Schedule` (optional) :returns: :class:`Schedule` \ No newline at end of file diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index 1f2fd285..bf8f8e77 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -153,7 +153,8 @@ Set object permissions. - Sets permissions on an object. Objects can inherit permissions from their parent objects or root + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their parent objects or root object. :param request_object_type: str diff --git a/docs/workspace/iam/users.rst b/docs/workspace/iam/users.rst index 2eae834a..616ef7b8 100644 --- a/docs/workspace/iam/users.rst +++ b/docs/workspace/iam/users.rst @@ -239,7 +239,8 @@ Set password permissions. - Sets permissions on all passwords. Passwords can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param access_control_list: List[:class:`PasswordAccessControlRequest`] (optional) diff --git a/docs/workspace/index.rst b/docs/workspace/index.rst index 1b6c5708..d9ca8419 100644 --- a/docs/workspace/index.rst +++ b/docs/workspace/index.rst @@ -17,6 +17,7 @@ These APIs are available from WorkspaceClient marketplace/index ml/index pipelines/index + provisioning/index serving/index settings/index sharing/index diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index b097c94c..0c6d5143 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -425,8 +425,8 @@ :param include_resolved_values: bool (optional) Whether to include resolved parameter values in the response. :param page_token: str (optional) - To list the next page or the previous page of job tasks, set this field to the value of the - `next_page_token` or `prev_page_token` returned in the GetJob response. + To list the next page of job tasks, set this field to the value of the `next_page_token` returned in + the GetJob response. :returns: :class:`Run` @@ -661,8 +661,9 @@ in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html"#parameter-variables") to set parameters containing - information about job runs. + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param latest_repair_id: int (optional) @@ -854,8 +855,9 @@ in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables](/jobs.html"#parameter-variables") to set parameters containing - information about job runs. + Use [Task parameter variables] to set parameters containing information about job runs. + + [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param notebook_params: Dict[str,str] (optional) @@ -926,7 +928,8 @@ Set job permissions. - Sets permissions on a job. Jobs can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param job_id: str The job for which to get or manage permissions. diff --git a/docs/workspace/ml/experiments.rst b/docs/workspace/ml/experiments.rst index c09cfe35..44ceeef8 100644 --- a/docs/workspace/ml/experiments.rst +++ b/docs/workspace/ml/experiments.rst @@ -578,7 +578,8 @@ Set experiment permissions. - Sets permissions on an experiment. Experiments can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param experiment_id: str The experiment for which to get or manage permissions. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 8ac52916..d08a8541 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -658,8 +658,8 @@ Set registered model permissions. - Sets permissions on a registered model. Registered models can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param registered_model_id: str The registered model for which to get or manage permissions. diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index 9801a200..39b5c9d7 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -324,7 +324,8 @@ Set pipeline permissions. - Sets permissions on a pipeline. Pipelines can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param pipeline_id: str The pipeline for which to get or manage permissions. diff --git a/docs/workspace/provisioning/credentials.rst b/docs/workspace/provisioning/credentials.rst new file mode 100644 index 00000000..8f38d13c --- /dev/null +++ b/docs/workspace/provisioning/credentials.rst @@ -0,0 +1,123 @@ +``w.credentials``: Credential configurations +============================================ +.. currentmodule:: databricks.sdk.service.provisioning + +.. py:class:: CredentialsAPI + + These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account + service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the + new workspace. A credential configuration encapsulates this role information, and its ID is used when + creating a new workspace. + + .. py:method:: create(credentials_name: str, aws_credentials: CreateCredentialAwsCredentials) -> Credential + + + Usage: + + .. code-block:: + + import os + import time + + from databricks.sdk import AccountClient + from databricks.sdk.service import provisioning + + a = AccountClient() + + role = a.credentials.create( + credentials_name=f'sdk-{time.time_ns()}', + aws_credentials=provisioning.CreateCredentialAwsCredentials(sts_role=provisioning.CreateCredentialStsRole( + role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]))) + + # cleanup + a.credentials.delete(credentials_id=role.credentials_id) + + Create credential configuration. + + Creates a Databricks credential configuration that represents cloud cross-account credentials for a + specified account. Databricks uses this to set up network infrastructure properly to host Databricks + clusters. For your AWS IAM role, you need to trust the External ID (the Databricks Account API account + ID) in the returned credential object, and configure the required access policy. + + Save the response's `credentials_id` field, which is the ID for your new credential configuration + object. + + For information about how to create a new workspace with this API, see [Create a new workspace using + the Account API] + + [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + + :param credentials_name: str + The human-readable name of the credential configuration object. + :param aws_credentials: :class:`CreateCredentialAwsCredentials` + + :returns: :class:`Credential` + + + .. py:method:: delete(credentials_id: str) + + Delete credential configuration. + + Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot + delete a credential that is associated with any workspace. + + :param credentials_id: str + Databricks Account API credential configuration ID + + + + + .. py:method:: get(credentials_id: str) -> Credential + + + Usage: + + .. code-block:: + + import os + import time + + from databricks.sdk import AccountClient + from databricks.sdk.service import provisioning + + a = AccountClient() + + role = a.credentials.create( + credentials_name=f'sdk-{time.time_ns()}', + aws_credentials=provisioning.CreateCredentialAwsCredentials(sts_role=provisioning.CreateCredentialStsRole( + role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]))) + + by_id = a.credentials.get(credentials_id=role.credentials_id) + + # cleanup + a.credentials.delete(credentials_id=role.credentials_id) + + Get credential configuration. + + Gets a Databricks credential configuration object for an account, both specified by ID. + + :param credentials_id: str + Databricks Account API credential configuration ID + + :returns: :class:`Credential` + + + .. py:method:: list() -> Iterator[Credential] + + + Usage: + + .. code-block:: + + from databricks.sdk import AccountClient + + a = AccountClient() + + configs = a.credentials.list() + + Get all credential configurations. + + Gets all Databricks credential configurations associated with an account specified by ID. + + :returns: Iterator over :class:`Credential` + \ No newline at end of file diff --git a/docs/workspace/provisioning/index.rst b/docs/workspace/provisioning/index.rst new file mode 100644 index 00000000..efe54142 --- /dev/null +++ b/docs/workspace/provisioning/index.rst @@ -0,0 +1,10 @@ + +Provisioning +============ + +Resource management for secure Databricks Workspace deployment, cross-account IAM roles, storage, encryption, networking and private access. + +.. toctree:: + :maxdepth: 1 + + credentials \ No newline at end of file diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index cbcbca96..430a1318 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -266,8 +266,8 @@ Set serving endpoint permissions. - Sets permissions on a serving endpoint. Serving endpoints can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param serving_endpoint_id: str The serving endpoint for which to get or manage permissions. diff --git a/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst b/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst new file mode 100644 index 00000000..1480fc97 --- /dev/null +++ b/docs/workspace/settings/aibi_dashboard_embedding_access_policy.rst @@ -0,0 +1,42 @@ +``w.settings.aibi_dashboard_embedding_access_policy``: AI/BI Dashboard Embedding Access Policy +============================================================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: AibiDashboardEmbeddingAccessPolicyAPI + + Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the + workspace level. By default, this setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). + + .. py:method:: get( [, etag: Optional[str]]) -> AibiDashboardEmbeddingAccessPolicySetting + + Retrieve the AI/BI dashboard embedding access policy. + + Retrieves the AI/BI dashboard embedding access policy. The default setting is ALLOW_APPROVED_DOMAINS, + permitting AI/BI dashboards to be embedded on approved domains. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`AibiDashboardEmbeddingAccessPolicySetting` + + + .. py:method:: update(allow_missing: bool, setting: AibiDashboardEmbeddingAccessPolicySetting, field_mask: str) -> AibiDashboardEmbeddingAccessPolicySetting + + Update the AI/BI dashboard embedding access policy. + + Updates the AI/BI dashboard embedding access policy at the workspace level. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`AibiDashboardEmbeddingAccessPolicySetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`AibiDashboardEmbeddingAccessPolicySetting` + \ No newline at end of file diff --git a/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst b/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst new file mode 100644 index 00000000..09b12056 --- /dev/null +++ b/docs/workspace/settings/aibi_dashboard_embedding_approved_domains.rst @@ -0,0 +1,42 @@ +``w.settings.aibi_dashboard_embedding_approved_domains``: AI/BI Dashboard Embedding Approved Domains +==================================================================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: AibiDashboardEmbeddingApprovedDomainsAPI + + Controls the list of domains approved to host the embedded AI/BI dashboards. The approved domains list + can't be mutated when the current access policy is not set to ALLOW_APPROVED_DOMAINS. + + .. py:method:: get( [, etag: Optional[str]]) -> AibiDashboardEmbeddingApprovedDomainsSetting + + Retrieve the list of domains approved to host embedded AI/BI dashboards. + + Retrieves the list of domains approved to host embedded AI/BI dashboards. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` + + + .. py:method:: update(allow_missing: bool, setting: AibiDashboardEmbeddingApprovedDomainsSetting, field_mask: str) -> AibiDashboardEmbeddingApprovedDomainsSetting + + Update the list of domains approved to host embedded AI/BI dashboards. + + Updates the list of domains approved to host embedded AI/BI dashboards. This update will fail if the + current workspace access policy is not ALLOW_APPROVED_DOMAINS. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`AibiDashboardEmbeddingApprovedDomainsSetting` + \ No newline at end of file diff --git a/docs/workspace/settings/index.rst b/docs/workspace/settings/index.rst index 22655853..c9e4f335 100644 --- a/docs/workspace/settings/index.rst +++ b/docs/workspace/settings/index.rst @@ -11,6 +11,8 @@ Manage security settings for Accounts and Workspaces ip_access_lists notification_destinations settings + aibi_dashboard_embedding_access_policy + aibi_dashboard_embedding_approved_domains automatic_cluster_update compliance_security_profile default_namespace diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index 58803192..aa806280 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -6,6 +6,18 @@ Workspace Settings API allows users to manage settings at the workspace level. + .. py:property:: aibi_dashboard_embedding_access_policy + :type: AibiDashboardEmbeddingAccessPolicyAPI + + Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the + workspace level. By default, this setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). + + .. py:property:: aibi_dashboard_embedding_approved_domains + :type: AibiDashboardEmbeddingApprovedDomainsAPI + + Controls the list of domains approved to host the embedded AI/BI dashboards. The approved domains list + can't be mutated when the current access policy is not set to ALLOW_APPROVED_DOMAINS. + .. py:property:: automatic_cluster_update :type: AutomaticClusterUpdateAPI diff --git a/docs/workspace/settings/token_management.rst b/docs/workspace/settings/token_management.rst index d030a432..9c938ce3 100644 --- a/docs/workspace/settings/token_management.rst +++ b/docs/workspace/settings/token_management.rst @@ -143,7 +143,8 @@ Set token permissions. - Sets permissions on all tokens. Tokens can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param access_control_list: List[:class:`TokenAccessControlRequest`] (optional) diff --git a/docs/workspace/sharing/index.rst b/docs/workspace/sharing/index.rst index e012eb54..09452b49 100644 --- a/docs/workspace/sharing/index.rst +++ b/docs/workspace/sharing/index.rst @@ -7,7 +7,6 @@ Configure data sharing with Unity Catalog for providers, recipients, and shares .. toctree:: :maxdepth: 1 - clean_rooms providers recipient_activation recipients diff --git a/docs/workspace/sql/statement_execution.rst b/docs/workspace/sql/statement_execution.rst index 716fa4fd..44f64b51 100644 --- a/docs/workspace/sql/statement_execution.rst +++ b/docs/workspace/sql/statement_execution.rst @@ -80,11 +80,10 @@ outstanding statement might have already completed execution when the cancel request arrives. Polling for status until a terminal state is reached is a reliable way to determine the final state. - Wait timeouts are approximate, occur server-side, and cannot account for things such as caller delays and network - latency from caller to service. - The system will auto-close a statement after one hour if the client - stops polling and thus you must poll at least once an hour. - The results are only available for one hour - after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle - of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL - Execution API to cancel it. + latency from caller to service. - To guarantee that the statement is kept alive, you must poll at least + once every 15 minutes. - The results are only available for one hour after success; polling does not + extend this. - The SQL Execution API must be used for the entire lifecycle of the statement. For example, + you cannot use the Jobs API to execute the command, and then the SQL Execution API to cancel it. [Apache Arrow Columnar]: https://arrow.apache.org/overview/ [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html diff --git a/docs/workspace/sql/warehouses.rst b/docs/workspace/sql/warehouses.rst index 58b8a3fc..fd55d5b0 100644 --- a/docs/workspace/sql/warehouses.rst +++ b/docs/workspace/sql/warehouses.rst @@ -315,7 +315,8 @@ Set SQL warehouse permissions. - Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param warehouse_id: str The SQL warehouse for which to get or manage permissions. diff --git a/docs/workspace/workspace/repos.rst b/docs/workspace/workspace/repos.rst index 01b1c875..3e826a06 100644 --- a/docs/workspace/workspace/repos.rst +++ b/docs/workspace/workspace/repos.rst @@ -157,7 +157,8 @@ Set repo permissions. - Sets permissions on a repo. Repos can inherit permissions from their root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their root object. :param repo_id: str The repo for which to get or manage permissions. diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 4aee0a2b..595872de 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -272,8 +272,9 @@ Set workspace object permissions. - Sets permissions on a workspace object. Workspace objects can inherit permissions from their parent - objects or root object. + Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct + permissions if none are specified. Objects can inherit permissions from their parent objects or root + object. :param workspace_object_type: str The workspace object type for which to get or manage permissions. From f7f9a685c0f11d3bac1ebfe9ef829b3d061f8501 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Tue, 5 Nov 2024 18:23:17 +0100 Subject: [PATCH 3/3] [Internal] Update PR template (#814) ## What changes are proposed in this pull request? This PR updates the PR template to remove outdated check boxes and emphasize testing. ## How is this tested? N/A --- .github/PULL_REQUEST_TEMPLATE.md | 34 +++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e2d7ab0d..91e519ed 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,12 +1,28 @@ -## Changes - +## What changes are proposed in this pull request? -## Tests - +Provide the readers and reviewers with the information they need to understand +this PR in a comprehensive manner. -- [ ] `make test` run locally -- [ ] `make fmt` applied -- [ ] relevant integration tests applied +Specifically, try to answer the two following questions: +- **WHAT** changes are being made in the PR? This should be a summary of the + major changes to allow the reader to quickly understand the PR without having + to look at the code. +- **WHY** are these changes needed? This should provide the context that the + reader might be missing. For example, were there any decisions behind the + change that are not reflected in the code itself? + +The “why part” is the most important of the two as it usually cannot be +inferred from the code itself. A well-written PR description will help future +developers (including your future self) to know how to interact and update your +code. + +## How is this tested? + +Describe any tests you have done; especially if test tests are not part of +the unit tests (e.g. local tests). + +**ALWAYS ANSWER THIS QUESTION:** Answer with "N/A" if tests are not applicable +to your PR (e.g. if the PR only modifies comments). Do not be afraid of +answering "Not tested" if the PR has not been tested. Being clear about what +has been done and not done provides important context to the reviewers. \ No newline at end of file