diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index f09edb728..20842dced 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -c3a3e3055fe11cb9683f398a665c225a03563ff1 \ No newline at end of file +c4784cea599325a13472b1455e7434d639362d8b \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 38a03117f..97a0fa447 100755 --- a/.gitattributes +++ b/.gitattributes @@ -65,6 +65,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppPermissions databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceDatabase.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceDatabaseDatabasePermission.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceJob.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceJobJobPermission.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceSecret.java linguist-generated=true @@ -76,6 +78,9 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceSql databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceUcSecurable.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceUcSecurableUcSecurablePermission.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceUcSecurableUcSecurableType.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatus.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatusUpdateState.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ApplicationState.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ApplicationStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsAPI.java linguist-generated=true @@ -84,6 +89,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsService.ja databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsSettingsAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsSettingsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsSettingsService.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AsyncUpdateAppRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeState.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/CreateAppDeploymentRequest.java linguist-generated=true @@ -97,6 +104,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppPermissi databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppPermissionLevelsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppPermissionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetCustomTemplateRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ListAppDeploymentsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ListAppDeploymentsResponse.java linguist-generated=true @@ -192,13 +200,23 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStor databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreAssignment.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsStorageCredentialInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastore.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignment.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignmentResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredential.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredentialResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ArtifactAllowlistInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ArtifactAllowlistsAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ArtifactAllowlistsImpl.java linguist-generated=true @@ -241,6 +259,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/Connections databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ContinuousUpdateStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccessRequestResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateCatalog.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateCredentialRequest.java linguist-generated=true @@ -581,6 +601,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TemporaryTa databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TriggeredUpdateStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UnassignRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccessRequestDestinationsRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateCatalog.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateCatalogWorkspaceBindingsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateConnection.java linguist-generated=true @@ -979,6 +1001,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MessageS databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MigrateDashboardRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/PublishRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/PublishedDashboard.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/QueryAttachmentParameter.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/Result.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/Schedule.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/SchedulePauseStatus.java linguist-generated=true @@ -996,6 +1019,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateData databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseInstanceRoleRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseTableRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateSyncedDatabaseTableRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CustomTag.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseCatalog.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseCredential.java linguist-generated=true @@ -1050,6 +1074,41 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/SyncedTabl databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/UpdateDatabaseCatalogRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/UpdateDatabaseInstanceRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/UpdateSyncedDatabaseTableRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AggregationGranularity.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AnomalyDetectionConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateMonitorRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateRefreshRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedule.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedulePauseStatus.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetric.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetricType.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingStatus.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceProblemType.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Monitor.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationDestination.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationSettings.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Refresh.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshState.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshTrigger.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/SnapshotConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/TimeSeriesConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateMonitorRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateRefreshRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/AddBlock.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/Close.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/Create.java linguist-generated=true @@ -1990,6 +2049,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/UpdateSta databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/UpdateStateInfoState.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsCredentials.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsKeyInfo.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureKeyInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureWorkspaceInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CloudResourceContainer.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateAwsKeyInfo.java linguist-generated=true @@ -2007,7 +2067,9 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Creden databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsService.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingComputeMode.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingGcpCloudResourceContainer.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingStorageMode.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerManagedKey.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteCredentialRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteEncryptionKeyRequest.java linguist-generated=true @@ -2021,7 +2083,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Encryp databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EndpointUseCase.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java linguist-generated=true @@ -2035,6 +2097,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetVpc databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfigConnectivityType.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworkHealth.java linguist-generated=true @@ -2064,6 +2127,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEnd databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WarningType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Workspace.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesImpl.java linguist-generated=true @@ -2188,6 +2252,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndp databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingModelWorkloadType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/TrafficConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotifications.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotificationsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateProvisionedThroughputEndpointConfigRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/V1ResponseChoiceElement.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/AccountIpAccessEnable.java linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index da376b995..456db26ab 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,3 +13,96 @@ ### Internal Changes ### API Changes +* Add `com.databricks.sdk.service.dataquality` package. +* Add `workspaceClient.dataQuality()` service. +* Add `createUpdate()` and `getUpdate()` methods for `workspaceClient.apps()` service. +* Add `updateNotifications()` method for `workspaceClient.servingEndpoints()` service. +* Add `computeSize` field for `com.databricks.sdk.service.apps.App`. +* Add `genieSpace` field for `com.databricks.sdk.service.apps.AppResource`. +* Add `skipValidation` field for `com.databricks.sdk.service.catalog.AccountsCreateStorageCredential`. +* Add `skipValidation` field for `com.databricks.sdk.service.catalog.AccountsUpdateStorageCredential`. +* Add `aliases`, `browseOnly`, `createdAt`, `createdBy`, `fullName`, `metastoreId`, `owner`, `updatedAt` and `updatedBy` fields for `com.databricks.sdk.service.catalog.CreateRegisteredModelRequest`. +* Add `includeUnbound` field for `com.databricks.sdk.service.catalog.ListCatalogsRequest`. +* Add `includeUnbound` field for `com.databricks.sdk.service.catalog.ListCredentialsRequest`. +* Add `includeUnbound` field for `com.databricks.sdk.service.catalog.ListExternalLocationsRequest`. +* Add `includeUnbound` field for `com.databricks.sdk.service.catalog.ListStorageCredentialsRequest`. +* Add `catalogName`, `id`, `modelName` and `schemaName` fields for `com.databricks.sdk.service.catalog.RegisteredModelAlias`. +* Add `aliases`, `catalogName`, `createdAt`, `createdBy`, `id`, `metastoreId`, `modelName`, `modelVersionDependencies`, `runId`, `runWorkspaceId`, `schemaName`, `source`, `status`, `storageLocation`, `updatedAt` and `updatedBy` fields for `com.databricks.sdk.service.catalog.UpdateModelVersionRequest`. +* Add `aliases`, `browseOnly`, `catalogName`, `createdAt`, `createdBy`, `metastoreId`, `name`, `schemaName`, `storageLocation`, `updatedAt` and `updatedBy` fields for `com.databricks.sdk.service.catalog.UpdateRegisteredModelRequest`. +* Add `parameters` field for `com.databricks.sdk.service.dashboards.GenieQueryAttachment`. +* Add `databaseInstanceName` field for `com.databricks.sdk.service.database.CreateDatabaseInstanceRoleRequest`. +* Add `customTags`, `effectiveCustomTags`, `effectiveUsagePolicyId` and `usagePolicyId` fields for `com.databricks.sdk.service.database.DatabaseInstance`. +* Add `effectiveAttributes` and `instanceName` fields for `com.databricks.sdk.service.database.DatabaseInstanceRole`. +* Add `keyRegion` field for `com.databricks.sdk.service.provisioning.CreateAwsKeyInfo`. +* Add `roleArn` field for `com.databricks.sdk.service.provisioning.CreateStorageConfigurationRequest`. +* Add `computeMode` field for `com.databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* Add `azureKeyInfo` field for `com.databricks.sdk.service.provisioning.CustomerManagedKey`. +* [Breaking] Add `customerFacingPrivateAccessSettings` field for `com.databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* Add `roleArn` field for `com.databricks.sdk.service.provisioning.StorageConfiguration`. +* [Breaking] Add `customerFacingWorkspace` field for `com.databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `updateMask` field for `com.databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* Add `computeMode`, `expectedWorkspaceStatus`, `network`, `networkConnectivityConfigId` and `storageMode` fields for `com.databricks.sdk.service.provisioning.Workspace`. +* Add `dependencyStorageLocations` field for `com.databricks.sdk.service.sharing.TableInternalAttributes`. +* Add `enableServerlessCompute` field for `com.databricks.sdk.service.sql.GetWorkspaceWarehouseConfigResponse`. +* Add `pageSize` and `pageToken` fields for `com.databricks.sdk.service.sql.ListWarehousesRequest`. +* Add `nextPageToken` field for `com.databricks.sdk.service.sql.ListWarehousesResponse`. +* Add `enableServerlessCompute` field for `com.databricks.sdk.service.sql.SetWorkspaceWarehouseConfigRequest`. +* Add `gitEmail` field for `com.databricks.sdk.service.workspace.CreateCredentialsRequest`. +* Add `gitEmail` field for `com.databricks.sdk.service.workspace.CreateCredentialsResponse`. +* Add `gitEmail` field for `com.databricks.sdk.service.workspace.CredentialInfo`. +* Add `gitEmail` field for `com.databricks.sdk.service.workspace.GetCredentialsResponse`. +* Add `gitEmail` field for `com.databricks.sdk.service.workspace.UpdateCredentialsRequest`. +* Add `MODEL_VERSION_STATUS_UNKNOWN` enum value for `com.databricks.sdk.service.catalog.ModelVersionInfoStatus`. +* Add `EXTERNAL_USE_SCHEMA` enum value for `com.databricks.sdk.service.catalog.Privilege`. +* Add `STREAM_NATIVE` enum value for `com.databricks.sdk.service.catalog.SystemType`. +* Add `K8S_ACTIVE_POD_QUOTA_EXCEEDED` and `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED` enum values for `com.databricks.sdk.service.compute.TerminationReasonCode`. +* Add `EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION` and `INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION` enum values for `com.databricks.sdk.service.dashboards.MessageErrorType`. +* Add `ASSET_TYPE_MCP` enum value for `com.databricks.sdk.service.marketplace.AssetType`. +* Add `GERMANY_TISAX` enum value for `com.databricks.sdk.service.settings.ComplianceStandard`. +* Add `SSH_BOOTSTRAP_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `INIT_CONTAINER_NOT_FINISHED`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `INVALID_WORKER_IMAGE_FAILURE`, `WORKSPACE_UPDATE`, `INVALID_AWS_PARAMETER`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_UNEXPECTED_FAILURE`, `UNEXPECTED_POD_RECREATION`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_KMS_KEY_PERMISSION_DENIED`, `DRIVER_EVICTION`, `USER_INITIATED_VM_TERMINATION`, `GCP_IAM_TIMEOUT`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `AWS_INVALID_KEY_PAIR`, `DRIVER_POD_CREATION_FAILURE`, `MAINTENANCE_MODE`, `INTERNAL_CAPACITY_FAILURE`, `EXECUTOR_POD_UNSCHEDULED`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `AWS_INVALID_KMS_KEY_STATE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_FORBIDDEN`, `GCP_NOT_FOUND`, `RESOURCE_USAGE_BLOCKED`, `DATA_ACCESS_CONFIG_CHANGED`, `ACCESS_TOKEN_FAILURE`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `IN_PENALTY_BOX`, `DISASTER_RECOVERY_REPLICATION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `GCP_SUBNET_NOT_READY`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `EOS_SPARK_IMAGE`, `NO_MATCHED_K8S`, `LAZY_ALLOCATION_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `SECRET_CREATION_FAILURE`, `POD_SCHEDULING_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `NETVISOR_SETUP_TIMEOUT`, `NO_MATCHED_K8S_TESTING_TAG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `GKE_BASED_CLUSTER_TERMINATION`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `DOCKER_INVALID_OS_EXCEPTION`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DNS_RESOLUTION_ERROR`, `GCP_DENIED_BY_ORG_POLICY`, `SECRET_PERMISSION_DENIED`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `DRIVER_UNHEALTHY`, `SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION`, `DRIVER_DNS_RESOLUTION_FAILURE`, `NO_ACTIVATED_K8S`, `USAGE_POLICY_ENTITLEMENT_DENIED`, `NO_ACTIVATED_K8S_TESTING_TAG`, `K8S_ACTIVE_POD_QUOTA_EXCEEDED` and `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED` enum values for `com.databricks.sdk.service.sql.TerminationReasonCode`. +* [Breaking] Change `create()` method for `accountClient.accountMetastoreAssignments()` service to start returning `com.databricks.sdk.service.catalog.AccountsCreateMetastoreAssignmentResponse` class. +* [Breaking] Change `delete()` method for `accountClient.accountMetastoreAssignments()` service to start returning `com.databricks.sdk.service.catalog.AccountsDeleteMetastoreAssignmentResponse` class. +* [Breaking] Change `update()` method for `accountClient.accountMetastoreAssignments()` service to start returning `com.databricks.sdk.service.catalog.AccountsUpdateMetastoreAssignmentResponse` class. +* [Breaking] Change `create()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsCreateMetastoreResponse` class. +* [Breaking] Change `delete()` method for `accountClient.accountMetastores()` service to start returning `com.databricks.sdk.service.catalog.AccountsDeleteMetastoreResponse` class. +* [Breaking] Change `get()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsGetMetastoreResponse` class. +* [Breaking] Change `list()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsListMetastoresResponse` class. +* [Breaking] Change `update()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsUpdateMetastoreResponse` class. +* [Breaking] Change `create()` method for `accountClient.accountStorageCredentials()` service to return `com.databricks.sdk.service.catalog.AccountsCreateStorageCredentialInfo` class. +* [Breaking] Change `delete()` method for `accountClient.accountStorageCredentials()` service to start returning `com.databricks.sdk.service.catalog.AccountsDeleteStorageCredentialResponse` class. +* [Breaking] Change `update()` method for `accountClient.accountStorageCredentials()` service to return `com.databricks.sdk.service.catalog.AccountsUpdateStorageCredentialResponse` class. +* [Breaking] Change `create()` method for `workspaceClient.registeredModels()` service with new required argument order. +* [Breaking] Change `delete()` method for `accountClient.credentials()` service to start returning `com.databricks.sdk.service.provisioning.Credential` class. +* [Breaking] Change `delete()` method for `accountClient.encryptionKeys()` service to start returning `com.databricks.sdk.service.provisioning.CustomerManagedKey` class. +* [Breaking] Change `create()` method for `accountClient.networks()` service with new required argument order. +* [Breaking] Change `delete()` method for `accountClient.networks()` service to start returning `com.databricks.sdk.service.provisioning.Network` class. +* [Breaking] Change `create()` and `replace()` methods for `accountClient.privateAccess()` service with new required argument order. +* [Breaking] Change `delete()` and `replace()` methods for `accountClient.privateAccess()` service to start returning `com.databricks.sdk.service.provisioning.PrivateAccessSettings` class. +* [Breaking] Change `delete()` method for `accountClient.storage()` service to start returning `com.databricks.sdk.service.provisioning.StorageConfiguration` class. +* [Breaking] Change `create()` method for `accountClient.vpcEndpoints()` service with new required argument order. +* [Breaking] Change `delete()` method for `accountClient.vpcEndpoints()` service to start returning `com.databricks.sdk.service.provisioning.VpcEndpoint` class. +* [Breaking] Change `create()` and `update()` methods for `accountClient.workspaces()` service with new required argument order. +* [Breaking] Change `delete()` and `update()` methods for `accountClient.workspaces()` service to start returning `com.databricks.sdk.service.provisioning.Workspace` class. +* [Breaking] Change `executeStatement()` method for `workspaceClient.statementExecution()` service . Method path has changed. +* [Breaking] Change `metastoreInfo` field for `com.databricks.sdk.service.catalog.AccountsCreateMetastore` to type `com.databricks.sdk.service.catalog.CreateAccountsMetastore` class. +* [Breaking] Change `credentialInfo` field for `com.databricks.sdk.service.catalog.AccountsCreateStorageCredential` to type `com.databricks.sdk.service.catalog.CreateAccountsStorageCredential` class. +* [Breaking] Change `metastoreInfo` field for `com.databricks.sdk.service.catalog.AccountsUpdateMetastore` to type `com.databricks.sdk.service.catalog.UpdateAccountsMetastore` class. +* [Breaking] Change `credentialInfo` field for `com.databricks.sdk.service.catalog.AccountsUpdateStorageCredential` to type `com.databricks.sdk.service.catalog.UpdateAccountsStorageCredential` class. +* Change `catalogName`, `name` and `schemaName` fields for `com.databricks.sdk.service.catalog.CreateRegisteredModelRequest` to no longer be required. +* Change `name` field for `com.databricks.sdk.service.database.DatabaseInstanceRole` to be required. +* [Breaking] Change `name` field for `com.databricks.sdk.service.database.DatabaseInstanceRole` to be required. +* Change `networkName` field for `com.databricks.sdk.service.provisioning.CreateNetworkRequest` to no longer be required. +* Change `privateAccessSettingsName` and `region` fields for `com.databricks.sdk.service.provisioning.CreatePrivateAccessSettingsRequest` to no longer be required. +* Change `vpcEndpointName` field for `com.databricks.sdk.service.provisioning.CreateVpcEndpointRequest` to no longer be required. +* Change `workspaceName` field for `com.databricks.sdk.service.provisioning.CreateWorkspaceRequest` to no longer be required. +* [Breaking] Change `dataplaneRelay` and `restApi` fields for `com.databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required. +* Change `dataplaneRelay` and `restApi` fields for `com.databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required. +* [Breaking] Change waiter for `accountClient.workspaces().update()` method. +* [Breaking] Remove `browseOnly` field for `com.databricks.sdk.service.catalog.ModelVersionInfo`. +* [Breaking] Remove `jarDependencies` field for `com.databricks.sdk.service.compute.Environment`. +* [Breaking] Remove `isNoPublicIpEnabled` field for `com.databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* [Breaking] Remove `allowedVpcEndpointIds`, `privateAccessLevel`, `privateAccessSettingsName`, `publicAccessEnabled` and `region` fields for `com.databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`. +* [Breaking] Remove `externalId` field for `com.databricks.sdk.service.provisioning.StsRole`. +* [Breaking] Remove `awsRegion`, `credentialsId`, `customTags`, `managedServicesCustomerManagedKeyId`, `networkConnectivityConfigId`, `networkId`, `privateAccessSettingsId`, `storageConfigurationId` and `storageCustomerManagedKeyId` fields for `com.databricks.sdk.service.provisioning.UpdateWorkspaceRequest`. +* [Breaking] Remove `externalCustomerInfo` and `isNoPublicIpEnabled` fields for `com.databricks.sdk.service.provisioning.Workspace`. +* [Breaking] Remove `STATUS_UNSPECIFIED` enum value for `com.databricks.sdk.service.sql.Status`. diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java index bac85f5a9..e0c6ccce2 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java @@ -103,6 +103,8 @@ import com.databricks.sdk.service.dashboards.LakeviewService; import com.databricks.sdk.service.database.DatabaseAPI; import com.databricks.sdk.service.database.DatabaseService; +import com.databricks.sdk.service.dataquality.DataQualityAPI; +import com.databricks.sdk.service.dataquality.DataQualityService; import com.databricks.sdk.service.files.DbfsService; import com.databricks.sdk.service.files.FilesAPI; import com.databricks.sdk.service.files.FilesService; @@ -286,6 +288,7 @@ public class WorkspaceClient { private CurrentUserAPI currentUserAPI; private DashboardWidgetsAPI dashboardWidgetsAPI; private DashboardsAPI dashboardsAPI; + private DataQualityAPI dataQualityAPI; private DataSourcesAPI dataSourcesAPI; private DatabaseAPI databaseAPI; private DbfsExt dbfsAPI; @@ -416,6 +419,7 @@ public WorkspaceClient(DatabricksConfig config) { currentUserAPI = new CurrentUserAPI(apiClient); dashboardWidgetsAPI = new DashboardWidgetsAPI(apiClient); dashboardsAPI = new DashboardsAPI(apiClient); + dataQualityAPI = new DataQualityAPI(apiClient); dataSourcesAPI = new DataSourcesAPI(apiClient); databaseAPI = new DatabaseAPI(apiClient); dbfsAPI = new DbfsExt(apiClient); @@ -801,6 +805,11 @@ public DashboardsAPI dashboards() { return dashboardsAPI; } + /** Manage the data quality of Unity Catalog objects (currently support `schema` and `table`) */ + public DataQualityAPI dataQuality() { + return dataQualityAPI; + } + /** * This API is provided to assist you in making new query objects. When creating a query object, * you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. @@ -1555,8 +1564,8 @@ public RedashConfigAPI redashConfig() { * version metadata (comments, aliases) create a new model version, or update permissions on the * registered model, users must be owners of the registered model. * - *
Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, - * grants) that specify a securable type, use "FUNCTION" as the securable type. + *
Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants) + * that specify a securable type, use FUNCTION as the securable type. */ public RegisteredModelsAPI registeredModels() { return registeredModelsAPI; @@ -1727,16 +1736,16 @@ public SharesAPI shares() { * has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, * or it can be set to `CANCEL`, which cancels the statement. * - *
In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call - * waits up to 30 seconds; if the statement execution finishes within this time, the result data - * is returned directly in the response. If the execution takes longer than 30 seconds, the - * execution is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - - * `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to - * finish but returns directly with a statement ID. The status of the statement execution can be - * polled by issuing :method:statementexecution/getStatement with the statement ID. Once the + *
In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The
+ * call waits up to 30 seconds; if the statement execution finishes within this time, the result
+ * data is returned directly in the response. If the execution takes longer than 30 seconds, the
+ * execution is canceled and the call returns with a `CANCELED` state. - **Asynchronous mode**
+ * (`wait_timeout=0s` and `on_wait_timeout` is ignored): The call doesn't wait for the statement
+ * to finish but returns directly with a statement ID. The status of the statement execution can
+ * be polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
* execution has succeeded, this call also returns the result and metadata in the response. -
- * Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for
- * up to 10 seconds; if the statement execution finishes within this time, the result data is
+ * **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits
+ * for up to 10 seconds; if the statement execution finishes within this time, the result data is
* returned directly in the response. If the execution takes longer than 10 seconds, a statement
* ID is returned. The statement ID can be used to fetch status and results in the same way as in
* the asynchronous mode.
@@ -2409,6 +2418,17 @@ public WorkspaceClient withDashboardsAPI(DashboardsAPI dashboards) {
return this;
}
+ /** Replace the default DataQualityService with a custom implementation. */
+ public WorkspaceClient withDataQualityImpl(DataQualityService dataQuality) {
+ return this.withDataQualityAPI(new DataQualityAPI(dataQuality));
+ }
+
+ /** Replace the default DataQualityAPI with a custom implementation. */
+ public WorkspaceClient withDataQualityAPI(DataQualityAPI dataQuality) {
+ this.dataQualityAPI = dataQuality;
+ return this;
+ }
+
/** Replace the default DataSourcesService with a custom implementation. */
public WorkspaceClient withDataSourcesImpl(DataSourcesService dataSources) {
return this.withDataSourcesAPI(new DataSourcesAPI(dataSources));
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java
index 97b6f3b19..97dda72ac 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java
@@ -25,6 +25,10 @@ public class App {
@JsonProperty("budget_policy_id")
private String budgetPolicyId;
+ /** */
+ @JsonProperty("compute_size")
+ private ComputeSize computeSize;
+
/** */
@JsonProperty("compute_status")
private ComputeStatus computeStatus;
@@ -141,6 +145,15 @@ public String getBudgetPolicyId() {
return budgetPolicyId;
}
+ public App setComputeSize(ComputeSize computeSize) {
+ this.computeSize = computeSize;
+ return this;
+ }
+
+ public ComputeSize getComputeSize() {
+ return computeSize;
+ }
+
public App setComputeStatus(ComputeStatus computeStatus) {
this.computeStatus = computeStatus;
return this;
@@ -329,6 +342,7 @@ public boolean equals(Object o) {
return Objects.equals(activeDeployment, that.activeDeployment)
&& Objects.equals(appStatus, that.appStatus)
&& Objects.equals(budgetPolicyId, that.budgetPolicyId)
+ && Objects.equals(computeSize, that.computeSize)
&& Objects.equals(computeStatus, that.computeStatus)
&& Objects.equals(createTime, that.createTime)
&& Objects.equals(creator, that.creator)
@@ -357,6 +371,7 @@ public int hashCode() {
activeDeployment,
appStatus,
budgetPolicyId,
+ computeSize,
computeStatus,
createTime,
creator,
@@ -385,6 +400,7 @@ public String toString() {
.add("activeDeployment", activeDeployment)
.add("appStatus", appStatus)
.add("budgetPolicyId", budgetPolicyId)
+ .add("computeSize", computeSize)
.add("computeStatus", computeStatus)
.add("createTime", createTime)
.add("creator", creator)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java
index 1e8acf263..2761c1651 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java
@@ -17,6 +17,10 @@ public class AppResource {
@JsonProperty("description")
private String description;
+ /** */
+ @JsonProperty("genie_space")
+ private AppResourceGenieSpace genieSpace;
+
/** */
@JsonProperty("job")
private AppResourceJob job;
@@ -59,6 +63,15 @@ public String getDescription() {
return description;
}
+ public AppResource setGenieSpace(AppResourceGenieSpace genieSpace) {
+ this.genieSpace = genieSpace;
+ return this;
+ }
+
+ public AppResourceGenieSpace getGenieSpace() {
+ return genieSpace;
+ }
+
public AppResource setJob(AppResourceJob job) {
this.job = job;
return this;
@@ -120,6 +133,7 @@ public boolean equals(Object o) {
AppResource that = (AppResource) o;
return Objects.equals(database, that.database)
&& Objects.equals(description, that.description)
+ && Objects.equals(genieSpace, that.genieSpace)
&& Objects.equals(job, that.job)
&& Objects.equals(name, that.name)
&& Objects.equals(secret, that.secret)
@@ -131,7 +145,15 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
return Objects.hash(
- database, description, job, name, secret, servingEndpoint, sqlWarehouse, ucSecurable);
+ database,
+ description,
+ genieSpace,
+ job,
+ name,
+ secret,
+ servingEndpoint,
+ sqlWarehouse,
+ ucSecurable);
}
@Override
@@ -139,6 +161,7 @@ public String toString() {
return new ToStringer(AppResource.class)
.add("database", database)
.add("description", description)
+ .add("genieSpace", genieSpace)
.add("job", job)
.add("name", name)
.add("secret", secret)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java
new file mode 100755
index 000000000..00045cdbd
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java
@@ -0,0 +1,74 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AppResourceGenieSpace {
+ /** */
+ @JsonProperty("name")
+ private String name;
+
+ /** */
+ @JsonProperty("permission")
+ private AppResourceGenieSpaceGenieSpacePermission permission;
+
+ /** */
+ @JsonProperty("space_id")
+ private String spaceId;
+
+ public AppResourceGenieSpace setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public AppResourceGenieSpace setPermission(AppResourceGenieSpaceGenieSpacePermission permission) {
+ this.permission = permission;
+ return this;
+ }
+
+ public AppResourceGenieSpaceGenieSpacePermission getPermission() {
+ return permission;
+ }
+
+ public AppResourceGenieSpace setSpaceId(String spaceId) {
+ this.spaceId = spaceId;
+ return this;
+ }
+
+ public String getSpaceId() {
+ return spaceId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AppResourceGenieSpace that = (AppResourceGenieSpace) o;
+ return Objects.equals(name, that.name)
+ && Objects.equals(permission, that.permission)
+ && Objects.equals(spaceId, that.spaceId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, permission, spaceId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AppResourceGenieSpace.class)
+ .add("name", name)
+ .add("permission", permission)
+ .add("spaceId", spaceId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java
new file mode 100755
index 000000000..c93785cc3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java
@@ -0,0 +1,13 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+
+@Generated
+public enum AppResourceGenieSpaceGenieSpacePermission {
+ CAN_EDIT,
+ CAN_MANAGE,
+ CAN_RUN,
+ CAN_VIEW,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java
new file mode 100755
index 000000000..b34c390e7
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java
@@ -0,0 +1,136 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+@Generated
+public class AppUpdate {
+ /** */
+ @JsonProperty("budget_policy_id")
+ private String budgetPolicyId;
+
+ /** */
+ @JsonProperty("compute_size")
+ private ComputeSize computeSize;
+
+ /** */
+ @JsonProperty("description")
+ private String description;
+
+ /** */
+ @JsonProperty("resources")
+ private Collection A field mask of `*` indicates full replacement. It’s recommended to always explicitly list
+ * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if
+ * the API changes in the future.
+ */
+ @JsonProperty("update_mask")
+ private String updateMask;
+
+ public AsyncUpdateAppRequest setApp(App app) {
+ this.app = app;
+ return this;
+ }
+
+ public App getApp() {
+ return app;
+ }
+
+ public AsyncUpdateAppRequest setAppName(String appName) {
+ this.appName = appName;
+ return this;
+ }
+
+ public String getAppName() {
+ return appName;
+ }
+
+ public AsyncUpdateAppRequest setUpdateMask(String updateMask) {
+ this.updateMask = updateMask;
+ return this;
+ }
+
+ public String getUpdateMask() {
+ return updateMask;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AsyncUpdateAppRequest that = (AsyncUpdateAppRequest) o;
+ return Objects.equals(app, that.app)
+ && Objects.equals(appName, that.appName)
+ && Objects.equals(updateMask, that.updateMask);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(app, appName, updateMask);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AsyncUpdateAppRequest.class)
+ .add("app", app)
+ .add("appName", appName)
+ .add("updateMask", updateMask)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java
new file mode 100755
index 000000000..ff5b63350
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java
@@ -0,0 +1,12 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+
+@Generated
+public enum ComputeSize {
+ LARGE,
+ LIQUID,
+ MEDIUM,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java
new file mode 100755
index 000000000..152df04b8
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java
@@ -0,0 +1,41 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class GetAppUpdateRequest {
+ /** The name of the app. */
+ @JsonIgnore private String appName;
+
+ public GetAppUpdateRequest setAppName(String appName) {
+ this.appName = appName;
+ return this;
+ }
+
+ public String getAppName() {
+ return appName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetAppUpdateRequest that = (GetAppUpdateRequest) o;
+ return Objects.equals(appName, that.appName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(appName);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetAppUpdateRequest.class).add("appName", appName).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java
index 61feaf2f3..6e1cd7c9e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java
@@ -25,20 +25,22 @@ public AccountMetastoreAssignmentsAPI(AccountMetastoreAssignmentsService mock) {
}
/** Creates an assignment to a metastore for a workspace */
- public void create(AccountsCreateMetastoreAssignment request) {
- impl.create(request);
+ public AccountsCreateMetastoreAssignmentResponse create(
+ AccountsCreateMetastoreAssignment request) {
+ return impl.create(request);
}
- public void delete(long workspaceId, String metastoreId) {
- delete(
+ public AccountsDeleteMetastoreAssignmentResponse delete(long workspaceId, String metastoreId) {
+ return delete(
new DeleteAccountMetastoreAssignmentRequest()
.setWorkspaceId(workspaceId)
.setMetastoreId(metastoreId));
}
/** Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. */
- public void delete(DeleteAccountMetastoreAssignmentRequest request) {
- impl.delete(request);
+ public AccountsDeleteMetastoreAssignmentResponse delete(
+ DeleteAccountMetastoreAssignmentRequest request) {
+ return impl.delete(request);
}
public AccountsMetastoreAssignment get(long workspaceId) {
@@ -47,7 +49,7 @@ public AccountsMetastoreAssignment get(long workspaceId) {
/**
* Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is
- * assigned a metastore, the mappig will be returned. If no metastore is assigned to the
+ * assigned a metastore, the mapping will be returned. If no metastore is assigned to the
* workspace, the assignment will not be found and a 404 returned.
*/
public AccountsMetastoreAssignment get(GetAccountMetastoreAssignmentRequest request) {
@@ -71,8 +73,9 @@ public Iterable * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials *
- * **GcpServiceAcountKey** for GCP credentials.
- *
- * The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on
+ * The caller must be a metastore admin and have the `CREATE_STORAGE_CREDENTIAL` privilege on
* the metastore.
*/
- public AccountsStorageCredentialInfo create(AccountsCreateStorageCredential request) {
+ public AccountsCreateStorageCredentialInfo create(AccountsCreateStorageCredential request) {
return impl.create(request);
}
- public void delete(String metastoreId, String storageCredentialName) {
- delete(
+ public AccountsDeleteStorageCredentialResponse delete(
+ String metastoreId, String storageCredentialName) {
+ return delete(
new DeleteAccountStorageCredentialRequest()
.setMetastoreId(metastoreId)
.setStorageCredentialName(storageCredentialName));
@@ -48,8 +48,9 @@ public void delete(String metastoreId, String storageCredentialName) {
* Deletes a storage credential from the metastore. The caller must be an owner of the storage
* credential.
*/
- public void delete(DeleteAccountStorageCredentialRequest request) {
- impl.delete(request);
+ public AccountsDeleteStorageCredentialResponse delete(
+ DeleteAccountStorageCredentialRequest request) {
+ return impl.delete(request);
}
public AccountsStorageCredentialInfo get(String metastoreId, String storageCredentialName) {
@@ -82,9 +83,9 @@ public Iterable * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials *
- * **GcpServiceAcountKey** for GCP credentials.
- *
- * The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on
+ * The caller must be a metastore admin and have the `CREATE_STORAGE_CREDENTIAL` privilege on
* the metastore.
*/
- AccountsStorageCredentialInfo create(
+ AccountsCreateStorageCredentialInfo create(
AccountsCreateStorageCredential accountsCreateStorageCredential);
/**
* Deletes a storage credential from the metastore. The caller must be an owner of the storage
* credential.
*/
- void delete(DeleteAccountStorageCredentialRequest deleteAccountStorageCredentialRequest);
+ AccountsDeleteStorageCredentialResponse delete(
+ DeleteAccountStorageCredentialRequest deleteAccountStorageCredentialRequest);
/**
* Gets a storage credential from the metastore. The caller must be a metastore admin, the owner
@@ -43,8 +43,8 @@ ListAccountStorageCredentialsResponse list(
/**
* Updates a storage credential on the metastore. The caller must be the owner of the storage
- * credential. If the caller is a metastore admin, only the __owner__ credential can be changed.
+ * credential. If the caller is a metastore admin, only the **owner** credential can be changed.
*/
- AccountsStorageCredentialInfo update(
+ AccountsUpdateStorageCredentialResponse update(
AccountsUpdateStorageCredential accountsUpdateStorageCredential);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java
index b297cbf98..e86dbfa1e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java
@@ -7,18 +7,19 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** Properties of the new metastore. */
@Generated
public class AccountsCreateMetastore {
/** */
@JsonProperty("metastore_info")
- private CreateMetastore metastoreInfo;
+ private CreateAccountsMetastore metastoreInfo;
- public AccountsCreateMetastore setMetastoreInfo(CreateMetastore metastoreInfo) {
+ public AccountsCreateMetastore setMetastoreInfo(CreateAccountsMetastore metastoreInfo) {
this.metastoreInfo = metastoreInfo;
return this;
}
- public CreateMetastore getMetastoreInfo() {
+ public CreateAccountsMetastore getMetastoreInfo() {
return metastoreInfo;
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java
index fa3e7a1e7..5ce5863fe 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java
@@ -8,6 +8,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** The mapping from workspace to metastore. */
@Generated
public class AccountsCreateMetastoreAssignment {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java
new file mode 100755
index 000000000..d8b003eaf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore assignment was successfully created. */
+@Generated
+public class AccountsCreateMetastoreAssignmentResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsCreateMetastoreAssignmentResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java
similarity index 76%
rename from databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java
rename to databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java
index 249aeb544..b6848ebe1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java
@@ -8,12 +8,12 @@
import java.util.Objects;
@Generated
-public class AccountsMetastoreInfo {
+public class AccountsCreateMetastoreResponse {
/** */
@JsonProperty("metastore_info")
private MetastoreInfo metastoreInfo;
- public AccountsMetastoreInfo setMetastoreInfo(MetastoreInfo metastoreInfo) {
+ public AccountsCreateMetastoreResponse setMetastoreInfo(MetastoreInfo metastoreInfo) {
this.metastoreInfo = metastoreInfo;
return this;
}
@@ -26,7 +26,7 @@ public MetastoreInfo getMetastoreInfo() {
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
- AccountsMetastoreInfo that = (AccountsMetastoreInfo) o;
+ AccountsCreateMetastoreResponse that = (AccountsCreateMetastoreResponse) o;
return Objects.equals(metastoreInfo, that.metastoreInfo);
}
@@ -37,7 +37,7 @@ public int hashCode() {
@Override
public String toString() {
- return new ToStringer(AccountsMetastoreInfo.class)
+ return new ToStringer(AccountsCreateMetastoreResponse.class)
.add("metastoreInfo", metastoreInfo)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java
index c1c33ea9f..a19caa490 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java
@@ -12,17 +12,25 @@
public class AccountsCreateStorageCredential {
/** */
@JsonProperty("credential_info")
- private CreateStorageCredential credentialInfo;
+ private CreateAccountsStorageCredential credentialInfo;
/** Unity Catalog metastore ID */
@JsonIgnore private String metastoreId;
- public AccountsCreateStorageCredential setCredentialInfo(CreateStorageCredential credentialInfo) {
+ /**
+ * Optional, default false. Supplying true to this argument skips validation of the created set of
+ * credentials.
+ */
+ @JsonProperty("skip_validation")
+ private Boolean skipValidation;
+
+ public AccountsCreateStorageCredential setCredentialInfo(
+ CreateAccountsStorageCredential credentialInfo) {
this.credentialInfo = credentialInfo;
return this;
}
- public CreateStorageCredential getCredentialInfo() {
+ public CreateAccountsStorageCredential getCredentialInfo() {
return credentialInfo;
}
@@ -35,18 +43,28 @@ public String getMetastoreId() {
return metastoreId;
}
+ public AccountsCreateStorageCredential setSkipValidation(Boolean skipValidation) {
+ this.skipValidation = skipValidation;
+ return this;
+ }
+
+ public Boolean getSkipValidation() {
+ return skipValidation;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AccountsCreateStorageCredential that = (AccountsCreateStorageCredential) o;
return Objects.equals(credentialInfo, that.credentialInfo)
- && Objects.equals(metastoreId, that.metastoreId);
+ && Objects.equals(metastoreId, that.metastoreId)
+ && Objects.equals(skipValidation, that.skipValidation);
}
@Override
public int hashCode() {
- return Objects.hash(credentialInfo, metastoreId);
+ return Objects.hash(credentialInfo, metastoreId, skipValidation);
}
@Override
@@ -54,6 +72,7 @@ public String toString() {
return new ToStringer(AccountsCreateStorageCredential.class)
.add("credentialInfo", credentialInfo)
.add("metastoreId", metastoreId)
+ .add("skipValidation", skipValidation)
.toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java
new file mode 100755
index 000000000..420d976e3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AccountsCreateStorageCredentialInfo {
+ /** */
+ @JsonProperty("credential_info")
+ private StorageCredentialInfo credentialInfo;
+
+ public AccountsCreateStorageCredentialInfo setCredentialInfo(
+ StorageCredentialInfo credentialInfo) {
+ this.credentialInfo = credentialInfo;
+ return this;
+ }
+
+ public StorageCredentialInfo getCredentialInfo() {
+ return credentialInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsCreateStorageCredentialInfo that = (AccountsCreateStorageCredentialInfo) o;
+ return Objects.equals(credentialInfo, that.credentialInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(credentialInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsCreateStorageCredentialInfo.class)
+ .add("credentialInfo", credentialInfo)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java
new file mode 100755
index 000000000..bb80b9155
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore assignment was successfully deleted. */
+@Generated
+public class AccountsDeleteMetastoreAssignmentResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsDeleteMetastoreAssignmentResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java
new file mode 100755
index 000000000..c764feedf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore was successfully deleted. */
+@Generated
+public class AccountsDeleteMetastoreResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsDeleteMetastoreResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java
new file mode 100755
index 000000000..125aa3bdf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The storage credential was successfully deleted. */
+@Generated
+public class AccountsDeleteStorageCredentialResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsDeleteStorageCredentialResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java
new file mode 100755
index 000000000..2da0eb3f3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The metastore was successfully returned. */
+@Generated
+public class AccountsGetMetastoreResponse {
+ /** */
+ @JsonProperty("metastore_info")
+ private MetastoreInfo metastoreInfo;
+
+ public AccountsGetMetastoreResponse setMetastoreInfo(MetastoreInfo metastoreInfo) {
+ this.metastoreInfo = metastoreInfo;
+ return this;
+ }
+
+ public MetastoreInfo getMetastoreInfo() {
+ return metastoreInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsGetMetastoreResponse that = (AccountsGetMetastoreResponse) o;
+ return Objects.equals(metastoreInfo, that.metastoreInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(metastoreInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsGetMetastoreResponse.class)
+ .add("metastoreInfo", metastoreInfo)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java
new file mode 100755
index 000000000..95620fe4d
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java
@@ -0,0 +1,46 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Metastores were returned successfully. */
+@Generated
+public class AccountsListMetastoresResponse {
+ /** An array of metastore information objects. */
+ @JsonProperty("metastores")
+ private Collection NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListCatalogsResponse list(ListCatalogsRequest listCatalogsRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java
index f2bdbb8e4..3386cd766 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java
@@ -62,7 +62,17 @@ public ConnectionInfo get(GetConnectionRequest request) {
return impl.get(request);
}
- /** List all connections. */
+ /**
+ * List all connections.
+ *
+ * NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
+ */
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
+ */
ListConnectionsResponse list(ListConnectionsRequest listConnectionsRequest);
/** Updates the connection that matches the supplied name. */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java
new file mode 100755
index 000000000..2d82924c0
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java
@@ -0,0 +1,74 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CreateAccountsMetastore {
+ /** The user-specified name of the metastore. */
+ @JsonProperty("name")
+ private String name;
+
+ /** Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). */
+ @JsonProperty("region")
+ private String region;
+
+ /** The storage root URL for metastore */
+ @JsonProperty("storage_root")
+ private String storageRoot;
+
+ public CreateAccountsMetastore setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public CreateAccountsMetastore setRegion(String region) {
+ this.region = region;
+ return this;
+ }
+
+ public String getRegion() {
+ return region;
+ }
+
+ public CreateAccountsMetastore setStorageRoot(String storageRoot) {
+ this.storageRoot = storageRoot;
+ return this;
+ }
+
+ public String getStorageRoot() {
+ return storageRoot;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CreateAccountsMetastore that = (CreateAccountsMetastore) o;
+ return Objects.equals(name, that.name)
+ && Objects.equals(region, that.region)
+ && Objects.equals(storageRoot, that.storageRoot);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, region, storageRoot);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CreateAccountsMetastore.class)
+ .add("name", name)
+ .add("region", region)
+ .add("storageRoot", storageRoot)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java
new file mode 100755
index 000000000..8e636a900
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java
@@ -0,0 +1,167 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CreateAccountsStorageCredential {
+ /** The AWS IAM role configuration. */
+ @JsonProperty("aws_iam_role")
+ private AwsIamRoleRequest awsIamRole;
+
+ /** The Azure managed identity configuration. */
+ @JsonProperty("azure_managed_identity")
+ private AzureManagedIdentityRequest azureManagedIdentity;
+
+ /** The Azure service principal configuration. */
+ @JsonProperty("azure_service_principal")
+ private AzureServicePrincipal azureServicePrincipal;
+
+ /** The Cloudflare API token configuration. */
+ @JsonProperty("cloudflare_api_token")
+ private CloudflareApiToken cloudflareApiToken;
+
+ /** Comment associated with the credential. */
+ @JsonProperty("comment")
+ private String comment;
+
+ /** The Databricks managed GCP service account configuration. */
+ @JsonProperty("databricks_gcp_service_account")
+ private DatabricksGcpServiceAccountRequest databricksGcpServiceAccount;
+
+ /**
+ * The credential name. The name must be unique among storage and service credentials within the
+ * metastore.
+ */
+ @JsonProperty("name")
+ private String name;
+
+ /**
+ * Whether the credential is usable only for read operations. Only applicable when purpose is
+ * **STORAGE**.
+ */
+ @JsonProperty("read_only")
+ private Boolean readOnly;
+
+ public CreateAccountsStorageCredential setAwsIamRole(AwsIamRoleRequest awsIamRole) {
+ this.awsIamRole = awsIamRole;
+ return this;
+ }
+
+ public AwsIamRoleRequest getAwsIamRole() {
+ return awsIamRole;
+ }
+
+ public CreateAccountsStorageCredential setAzureManagedIdentity(
+ AzureManagedIdentityRequest azureManagedIdentity) {
+ this.azureManagedIdentity = azureManagedIdentity;
+ return this;
+ }
+
+ public AzureManagedIdentityRequest getAzureManagedIdentity() {
+ return azureManagedIdentity;
+ }
+
+ public CreateAccountsStorageCredential setAzureServicePrincipal(
+ AzureServicePrincipal azureServicePrincipal) {
+ this.azureServicePrincipal = azureServicePrincipal;
+ return this;
+ }
+
+ public AzureServicePrincipal getAzureServicePrincipal() {
+ return azureServicePrincipal;
+ }
+
+ public CreateAccountsStorageCredential setCloudflareApiToken(
+ CloudflareApiToken cloudflareApiToken) {
+ this.cloudflareApiToken = cloudflareApiToken;
+ return this;
+ }
+
+ public CloudflareApiToken getCloudflareApiToken() {
+ return cloudflareApiToken;
+ }
+
+ public CreateAccountsStorageCredential setComment(String comment) {
+ this.comment = comment;
+ return this;
+ }
+
+ public String getComment() {
+ return comment;
+ }
+
+ public CreateAccountsStorageCredential setDatabricksGcpServiceAccount(
+ DatabricksGcpServiceAccountRequest databricksGcpServiceAccount) {
+ this.databricksGcpServiceAccount = databricksGcpServiceAccount;
+ return this;
+ }
+
+ public DatabricksGcpServiceAccountRequest getDatabricksGcpServiceAccount() {
+ return databricksGcpServiceAccount;
+ }
+
+ public CreateAccountsStorageCredential setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public CreateAccountsStorageCredential setReadOnly(Boolean readOnly) {
+ this.readOnly = readOnly;
+ return this;
+ }
+
+ public Boolean getReadOnly() {
+ return readOnly;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CreateAccountsStorageCredential that = (CreateAccountsStorageCredential) o;
+ return Objects.equals(awsIamRole, that.awsIamRole)
+ && Objects.equals(azureManagedIdentity, that.azureManagedIdentity)
+ && Objects.equals(azureServicePrincipal, that.azureServicePrincipal)
+ && Objects.equals(cloudflareApiToken, that.cloudflareApiToken)
+ && Objects.equals(comment, that.comment)
+ && Objects.equals(databricksGcpServiceAccount, that.databricksGcpServiceAccount)
+ && Objects.equals(name, that.name)
+ && Objects.equals(readOnly, that.readOnly);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ awsIamRole,
+ azureManagedIdentity,
+ azureServicePrincipal,
+ cloudflareApiToken,
+ comment,
+ databricksGcpServiceAccount,
+ name,
+ readOnly);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CreateAccountsStorageCredential.class)
+ .add("awsIamRole", awsIamRole)
+ .add("azureManagedIdentity", azureManagedIdentity)
+ .add("azureServicePrincipal", azureServicePrincipal)
+ .add("cloudflareApiToken", cloudflareApiToken)
+ .add("comment", comment)
+ .add("databricksGcpServiceAccount", databricksGcpServiceAccount)
+ .add("name", name)
+ .add("readOnly", readOnly)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java
index 83052df1f..56a9b59e3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java
@@ -9,7 +9,7 @@
@Generated
public class CreateFunction {
- /** Name of parent catalog. */
+ /** Name of parent Catalog. */
@JsonProperty("catalog_name")
private String catalogName;
@@ -33,7 +33,7 @@ public class CreateFunction {
@JsonProperty("full_data_type")
private String fullDataType;
- /** */
+ /** Function input parameters. */
@JsonProperty("input_params")
private FunctionParameterInfos inputParams;
@@ -63,8 +63,8 @@ public class CreateFunction {
/**
* Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot
- * be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
+ * specified in the **external_language** field, and the **return_params** of the function cannot
+ * be used (as **TABLE** return type is not supported), and the **sql_data_access** field must be
* **NO_SQL**.
*/
@JsonProperty("routine_body")
@@ -74,11 +74,11 @@ public class CreateFunction {
@JsonProperty("routine_definition")
private String routineDefinition;
- /** Function dependencies. */
+ /** function dependencies. */
@JsonProperty("routine_dependencies")
private DependencyList routineDependencies;
- /** Name of parent schema relative to its parent catalog. */
+ /** Name of parent Schema relative to its parent Catalog. */
@JsonProperty("schema_name")
private String schemaName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java
index 691b8e514..721bb01f9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function parameter style. **S** is the value for SQL. */
@Generated
public enum CreateFunctionParameterStyle {
S,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java
index 6132a4c2a..f5b1b42e9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java
@@ -4,12 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot be
- * used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
- * **NO_SQL**.
- */
@Generated
public enum CreateFunctionRoutineBody {
EXTERNAL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java
index a0b13a4ee..480b1279a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** The security type of the function. */
@Generated
public enum CreateFunctionSecurityType {
DEFINER,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java
index d8cb91987..28cb1b373 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function SQL data access. */
@Generated
public enum CreateFunctionSqlDataAccess {
CONTAINS_SQL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java
index 520b0f60a..71a3650f9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java
@@ -5,10 +5,22 @@
import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
import java.util.Objects;
@Generated
public class CreateRegisteredModelRequest {
+ /** List of aliases associated with the registered model */
+ @JsonProperty("aliases")
+ private Collection [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external
+ */
@JsonProperty("volume_type")
private VolumeType volumeType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java
index 3609ad11f..35a82e3ec 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java
@@ -17,7 +17,7 @@ public class DeleteFunctionRequest {
/**
* The fully-qualified name of the function (of the form
- * __catalog_name__.__schema_name__.__function__name__).
+ * __catalog_name__.__schema_name__.__function__name__) .
*/
@JsonIgnore private String name;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationsAPI.java
index 49699b629..1f5fbae89 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ExternalLocationsAPI.java
@@ -73,6 +73,14 @@ public ExternalLocationInfo get(GetExternalLocationRequest request) {
* caller must be a metastore admin, the owner of the external location, or a user that has some
* privilege on the external location. There is no guarantee of a specific ordering of the
* elements in the array.
+ *
+ * NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListExternalLocationsResponse list(ListExternalLocationsRequest listExternalLocationsRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java
index 02b5d835d..69ca56bd1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java
@@ -16,7 +16,7 @@ public class FunctionInfo {
@JsonProperty("browse_only")
private Boolean browseOnly;
- /** Name of parent catalog. */
+ /** Name of parent Catalog. */
@JsonProperty("catalog_name")
private String catalogName;
@@ -48,7 +48,7 @@ public class FunctionInfo {
@JsonProperty("full_data_type")
private String fullDataType;
- /** Full name of function, in form of __catalog_name__.__schema_name__.__function__name__ */
+ /** Full name of Function, in form of **catalog_name**.**schema_name**.**function_name** */
@JsonProperty("full_name")
private String fullName;
@@ -56,7 +56,7 @@ public class FunctionInfo {
@JsonProperty("function_id")
private String functionId;
- /** */
+ /** Function input parameters. */
@JsonProperty("input_params")
private FunctionParameterInfos inputParams;
@@ -76,7 +76,7 @@ public class FunctionInfo {
@JsonProperty("name")
private String name;
- /** Username of current owner of function. */
+ /** Username of current owner of the function. */
@JsonProperty("owner")
private String owner;
@@ -94,8 +94,8 @@ public class FunctionInfo {
/**
* Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot
- * be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
+ * specified in the **external_language** field, and the **return_params** of the function cannot
+ * be used (as **TABLE** return type is not supported), and the **sql_data_access** field must be
* **NO_SQL**.
*/
@JsonProperty("routine_body")
@@ -105,11 +105,11 @@ public class FunctionInfo {
@JsonProperty("routine_definition")
private String routineDefinition;
- /** Function dependencies. */
+ /** function dependencies. */
@JsonProperty("routine_dependencies")
private DependencyList routineDependencies;
- /** Name of parent schema relative to its parent catalog. */
+ /** Name of parent Schema relative to its parent Catalog. */
@JsonProperty("schema_name")
private String schemaName;
@@ -129,11 +129,11 @@ public class FunctionInfo {
@JsonProperty("sql_path")
private String sqlPath;
- /** Time at which this function was created, in epoch milliseconds. */
+ /** Time at which this function was last modified, in epoch milliseconds. */
@JsonProperty("updated_at")
private Long updatedAt;
- /** Username of user who last modified function. */
+ /** Username of user who last modified the function. */
@JsonProperty("updated_by")
private String updatedBy;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java
index fab71fe1e..608574f72 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function parameter style. **S** is the value for SQL. */
@Generated
public enum FunctionInfoParameterStyle {
S,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java
index 24f8266e0..f69f1f670 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java
@@ -4,12 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot be
- * used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
- * **NO_SQL**.
- */
@Generated
public enum FunctionInfoRoutineBody {
EXTERNAL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java
index 5b45675b4..ce6545a69 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** The security type of the function. */
@Generated
public enum FunctionInfoSecurityType {
DEFINER,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java
index 69b362394..fee8adcc8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function SQL data access. */
@Generated
public enum FunctionInfoSqlDataAccess {
CONTAINS_SQL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java
index 7e41e1dc0..ce5724ef9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java
@@ -13,7 +13,7 @@ public class FunctionParameterInfo {
@JsonProperty("comment")
private String comment;
- /** Name of parameter. */
+ /** Name of Parameter. */
@JsonProperty("name")
private String name;
@@ -21,11 +21,11 @@ public class FunctionParameterInfo {
@JsonProperty("parameter_default")
private String parameterDefault;
- /** */
+ /** Function parameter mode. */
@JsonProperty("parameter_mode")
private FunctionParameterMode parameterMode;
- /** */
+ /** Function parameter type. */
@JsonProperty("parameter_type")
private FunctionParameterType parameterType;
@@ -41,7 +41,7 @@ public class FunctionParameterInfo {
@JsonProperty("type_json")
private String typeJson;
- /** */
+ /** Name of type (INT, STRUCT, MAP, etc.) */
@JsonProperty("type_name")
private ColumnTypeName typeName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java
index 8242101b9..f2941005c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java
@@ -10,7 +10,7 @@
@Generated
public class FunctionParameterInfos {
- /** The array of __FunctionParameterInfo__ definitions of the function's parameters. */
+ /** */
@JsonProperty("parameters")
private Collection NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListFunctionsResponse list(ListFunctionsRequest listFunctionsRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java
index 29c20c2fe..54943217b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java
@@ -12,7 +12,7 @@ public class GetAccountStorageCredentialRequest {
/** Unity Catalog metastore ID */
@JsonIgnore private String metastoreId;
- /** Name of the storage credential. */
+ /** Required. Name of the storage credential. */
@JsonIgnore private String storageCredentialName;
public GetAccountStorageCredentialRequest setMetastoreId(String metastoreId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java
index 5ca1d4263..be5de6d2e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java
@@ -8,7 +8,7 @@
import java.util.Collection;
import java.util.Objects;
-/** The list of workspaces to which the given metastore is assigned. */
+/** The metastore assignments were successfully returned. */
@Generated
public class ListAccountMetastoreAssignmentsResponse {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java
index a5da186e4..fde3a512a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java
@@ -8,6 +8,7 @@
import java.util.Collection;
import java.util.Objects;
+/** The metastore storage credentials were successfully returned. */
@Generated
public class ListAccountStorageCredentialsResponse {
/** An array of metastore storage credentials. */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCatalogsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCatalogsRequest.java
index 05fe12886..758e8afff 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCatalogsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCatalogsRequest.java
@@ -18,6 +18,14 @@ public class ListCatalogsRequest {
@QueryParam("include_browse")
private Boolean includeBrowse;
+ /**
+ * Whether to include catalogs not bound to the workspace. Effective only if the user has
+ * permission to update the catalog–workspace binding.
+ */
+ @JsonIgnore
+ @QueryParam("include_unbound")
+ private Boolean includeUnbound;
+
/**
* Maximum number of catalogs to return. - when set to 0, the page length is set to a server
* configured value (recommended); - when set to a value greater than 0, the page length is the
@@ -45,6 +53,15 @@ public Boolean getIncludeBrowse() {
return includeBrowse;
}
+ public ListCatalogsRequest setIncludeUnbound(Boolean includeUnbound) {
+ this.includeUnbound = includeUnbound;
+ return this;
+ }
+
+ public Boolean getIncludeUnbound() {
+ return includeUnbound;
+ }
+
public ListCatalogsRequest setMaxResults(Long maxResults) {
this.maxResults = maxResults;
return this;
@@ -69,19 +86,21 @@ public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ListCatalogsRequest that = (ListCatalogsRequest) o;
return Objects.equals(includeBrowse, that.includeBrowse)
+ && Objects.equals(includeUnbound, that.includeUnbound)
&& Objects.equals(maxResults, that.maxResults)
&& Objects.equals(pageToken, that.pageToken);
}
@Override
public int hashCode() {
- return Objects.hash(includeBrowse, maxResults, pageToken);
+ return Objects.hash(includeBrowse, includeUnbound, maxResults, pageToken);
}
@Override
public String toString() {
return new ToStringer(ListCatalogsRequest.class)
.add("includeBrowse", includeBrowse)
+ .add("includeUnbound", includeUnbound)
.add("maxResults", maxResults)
.add("pageToken", pageToken)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCredentialsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCredentialsRequest.java
index 32dfc1888..c9b2c2dc2 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCredentialsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListCredentialsRequest.java
@@ -10,6 +10,14 @@
@Generated
public class ListCredentialsRequest {
+ /**
+ * Whether to include credentials not bound to the workspace. Effective only if the user has
+ * permission to update the credential–workspace binding.
+ */
+ @JsonIgnore
+ @QueryParam("include_unbound")
+ private Boolean includeUnbound;
+
/**
* Maximum number of credentials to return. - If not set, the default max page size is used. -
* When set to a value greater than 0, the page length is the minimum of this value and a
@@ -30,6 +38,15 @@ public class ListCredentialsRequest {
@QueryParam("purpose")
private CredentialPurpose purpose;
+ public ListCredentialsRequest setIncludeUnbound(Boolean includeUnbound) {
+ this.includeUnbound = includeUnbound;
+ return this;
+ }
+
+ public Boolean getIncludeUnbound() {
+ return includeUnbound;
+ }
+
public ListCredentialsRequest setMaxResults(Long maxResults) {
this.maxResults = maxResults;
return this;
@@ -62,19 +79,21 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListCredentialsRequest that = (ListCredentialsRequest) o;
- return Objects.equals(maxResults, that.maxResults)
+ return Objects.equals(includeUnbound, that.includeUnbound)
+ && Objects.equals(maxResults, that.maxResults)
&& Objects.equals(pageToken, that.pageToken)
&& Objects.equals(purpose, that.purpose);
}
@Override
public int hashCode() {
- return Objects.hash(maxResults, pageToken, purpose);
+ return Objects.hash(includeUnbound, maxResults, pageToken, purpose);
}
@Override
public String toString() {
return new ToStringer(ListCredentialsRequest.class)
+ .add("includeUnbound", includeUnbound)
.add("maxResults", maxResults)
.add("pageToken", pageToken)
.add("purpose", purpose)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListExternalLocationsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListExternalLocationsRequest.java
index ce3805d49..71bfa3314 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListExternalLocationsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListExternalLocationsRequest.java
@@ -18,6 +18,14 @@ public class ListExternalLocationsRequest {
@QueryParam("include_browse")
private Boolean includeBrowse;
+ /**
+ * Whether to include external locations not bound to the workspace. Effective only if the user
+ * has permission to update the location–workspace binding.
+ */
+ @JsonIgnore
+ @QueryParam("include_unbound")
+ private Boolean includeUnbound;
+
/**
* Maximum number of external locations to return. If not set, all the external locations are
* returned (not recommended). - when set to a value greater than 0, the page length is the
@@ -43,6 +51,15 @@ public Boolean getIncludeBrowse() {
return includeBrowse;
}
+ public ListExternalLocationsRequest setIncludeUnbound(Boolean includeUnbound) {
+ this.includeUnbound = includeUnbound;
+ return this;
+ }
+
+ public Boolean getIncludeUnbound() {
+ return includeUnbound;
+ }
+
public ListExternalLocationsRequest setMaxResults(Long maxResults) {
this.maxResults = maxResults;
return this;
@@ -67,19 +84,21 @@ public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ListExternalLocationsRequest that = (ListExternalLocationsRequest) o;
return Objects.equals(includeBrowse, that.includeBrowse)
+ && Objects.equals(includeUnbound, that.includeUnbound)
&& Objects.equals(maxResults, that.maxResults)
&& Objects.equals(pageToken, that.pageToken);
}
@Override
public int hashCode() {
- return Objects.hash(includeBrowse, maxResults, pageToken);
+ return Objects.hash(includeBrowse, includeUnbound, maxResults, pageToken);
}
@Override
public String toString() {
return new ToStringer(ListExternalLocationsRequest.class)
.add("includeBrowse", includeBrowse)
+ .add("includeUnbound", includeUnbound)
.add("maxResults", maxResults)
.add("pageToken", pageToken)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListStorageCredentialsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListStorageCredentialsRequest.java
index c91be8012..9f1f82035 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListStorageCredentialsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListStorageCredentialsRequest.java
@@ -10,6 +10,14 @@
@Generated
public class ListStorageCredentialsRequest {
+ /**
+ * Whether to include credentials not bound to the workspace. Effective only if the user has
+ * permission to update the credential–workspace binding.
+ */
+ @JsonIgnore
+ @QueryParam("include_unbound")
+ private Boolean includeUnbound;
+
/**
* Maximum number of storage credentials to return. If not set, all the storage credentials are
* returned (not recommended). - when set to a value greater than 0, the page length is the
@@ -26,6 +34,15 @@ public class ListStorageCredentialsRequest {
@QueryParam("page_token")
private String pageToken;
+ public ListStorageCredentialsRequest setIncludeUnbound(Boolean includeUnbound) {
+ this.includeUnbound = includeUnbound;
+ return this;
+ }
+
+ public Boolean getIncludeUnbound() {
+ return includeUnbound;
+ }
+
public ListStorageCredentialsRequest setMaxResults(Long maxResults) {
this.maxResults = maxResults;
return this;
@@ -49,17 +66,20 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListStorageCredentialsRequest that = (ListStorageCredentialsRequest) o;
- return Objects.equals(maxResults, that.maxResults) && Objects.equals(pageToken, that.pageToken);
+ return Objects.equals(includeUnbound, that.includeUnbound)
+ && Objects.equals(maxResults, that.maxResults)
+ && Objects.equals(pageToken, that.pageToken);
}
@Override
public int hashCode() {
- return Objects.hash(maxResults, pageToken);
+ return Objects.hash(includeUnbound, maxResults, pageToken);
}
@Override
public String toString() {
return new ToStringer(ListStorageCredentialsRequest.class)
+ .add("includeUnbound", includeUnbound)
.add("maxResults", maxResults)
.add("pageToken", pageToken)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java
index 0d9640d5f..3f78acc3b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java
@@ -9,7 +9,10 @@
@Generated
public class MetastoreAssignment {
- /** The name of the default catalog in the metastore. */
+ /**
+ * The name of the default catalog in the metastore. This field is deprecated. Please use "Default
+ * Namespace API" to configure the default catalog for a Databricks workspace.
+ */
@JsonProperty("default_catalog_name")
private String defaultCatalogName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoresAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoresAPI.java
index e9d5011e1..ef832d957 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoresAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoresAPI.java
@@ -85,6 +85,14 @@ public MetastoreInfo get(GetMetastoreRequest request) {
* Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an
* admin to retrieve this info. There is no guarantee of a specific ordering of the elements in
* the array.
+ *
+ * NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListMetastoresResponse list(ListMetastoresRequest listMetastoresRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java
index 8dbd67ae1..dd2cc00b9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java
@@ -14,13 +14,6 @@ public class ModelVersionInfo {
@JsonProperty("aliases")
private Collection Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants)
- * that specify a securable type, use "FUNCTION" as the securable type.
+ * Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants)
+ * that specify a securable type, use FUNCTION as the securable type.
*/
@Generated
public class RegisteredModelsAPI {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java
index a03772d04..ccc99737b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java
@@ -26,8 +26,8 @@
* metadata (comments, aliases) create a new model version, or update permissions on the registered
* model, users must be owners of the registered model.
*
- * Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants)
- * that specify a securable type, use "FUNCTION" as the securable type.
+ * Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants)
+ * that specify a securable type, use FUNCTION as the securable type.
*
* This is the high-level interface, that contains generated methods.
*
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java
index f8657471a..3edf06b15 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java
@@ -70,6 +70,14 @@ public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListSchemasResponse list(ListSchemasRequest listSchemasRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java
index fd09c0225..a02ad9204 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
-/** Latest kind: CONNECTION_PALANTIR_OAUTH_M2M = 263; Next id:264 */
+/** Latest kind: CONNECTION_REDSHIFT_IAM = 265; Next id:266 */
@Generated
public enum SecurableKind {
TABLE_DB_STORAGE,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java
index 3c6c39fde..a024d5ded 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java
@@ -4,18 +4,17 @@
import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
@Generated
public class SetRegisteredModelAliasRequest {
/** The name of the alias */
- @JsonProperty("alias")
- private String alias;
+ @JsonIgnore private String alias;
- /** Full name of the registered model */
- @JsonProperty("full_name")
- private String fullName;
+ /** The three-level (fully qualified) name of the registered model */
+ @JsonIgnore private String fullName;
/** The version number of the model version to which the alias points */
@JsonProperty("version_num")
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/StorageCredentialsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/StorageCredentialsAPI.java
index 2ce220029..89ee608ec 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/StorageCredentialsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/StorageCredentialsAPI.java
@@ -75,6 +75,14 @@ public StorageCredentialInfo get(GetStorageCredentialRequest request) {
* limited to only those storage credentials the caller has permission to access. If the caller is
* a metastore admin, retrieval of credentials is unrestricted. There is no guarantee of a
* specific ordering of the elements in the array.
+ *
+ * NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListStorageCredentialsResponse list(ListStorageCredentialsRequest listStorageCredentialsRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemSchemasAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemSchemasAPI.java
index d6784e1e6..a62bef4ca 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemSchemasAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemSchemasAPI.java
@@ -55,6 +55,14 @@ public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListSystemSchemasResponse list(ListSystemSchemasRequest listSystemSchemasRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java
index 324e1b850..527a800b8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java
@@ -25,6 +25,7 @@ public enum SystemType {
SAP,
SERVICENOW,
SNOWFLAKE,
+ STREAM_NATIVE,
TABLEAU,
TERADATA,
WORKDAY,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesAPI.java
index 71c7e2a1b..7ae3a0063 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesAPI.java
@@ -116,6 +116,14 @@ public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
public Iterable NOTE: we recommend using max_results=0 to use the paginated version of this API. Unpaginated
+ * calls will be deprecated soon.
+ *
+ * PAGINATION BEHAVIOR: When using pagination (max_results >= 0), a page may contain zero
+ * results while still providing a next_page_token. Clients must continue reading pages until
+ * next_page_token is absent, which is the only indication that the end of results has been
+ * reached. This behavior follows Google AIP-158 guidelines.
*/
ListTablesResponse list(ListTablesRequest listTablesRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java
new file mode 100755
index 000000000..215f0eacf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java
@@ -0,0 +1,134 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateAccountsMetastore {
+ /**
+ * The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta
+ * Sharing as the official name.
+ */
+ @JsonProperty("delta_sharing_organization_name")
+ private String deltaSharingOrganizationName;
+
+ /** The lifetime of delta sharing recipient token in seconds. */
+ @JsonProperty("delta_sharing_recipient_token_lifetime_in_seconds")
+ private Long deltaSharingRecipientTokenLifetimeInSeconds;
+
+ /** The scope of Delta Sharing enabled for the metastore. */
+ @JsonProperty("delta_sharing_scope")
+ private DeltaSharingScopeEnum deltaSharingScope;
+
+ /** The owner of the metastore. */
+ @JsonProperty("owner")
+ private String owner;
+
+ /** Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). */
+ @JsonProperty("privilege_model_version")
+ private String privilegeModelVersion;
+
+ /** UUID of storage credential to access the metastore storage_root. */
+ @JsonProperty("storage_root_credential_id")
+ private String storageRootCredentialId;
+
+ public UpdateAccountsMetastore setDeltaSharingOrganizationName(
+ String deltaSharingOrganizationName) {
+ this.deltaSharingOrganizationName = deltaSharingOrganizationName;
+ return this;
+ }
+
+ public String getDeltaSharingOrganizationName() {
+ return deltaSharingOrganizationName;
+ }
+
+ public UpdateAccountsMetastore setDeltaSharingRecipientTokenLifetimeInSeconds(
+ Long deltaSharingRecipientTokenLifetimeInSeconds) {
+ this.deltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds;
+ return this;
+ }
+
+ public Long getDeltaSharingRecipientTokenLifetimeInSeconds() {
+ return deltaSharingRecipientTokenLifetimeInSeconds;
+ }
+
+ public UpdateAccountsMetastore setDeltaSharingScope(DeltaSharingScopeEnum deltaSharingScope) {
+ this.deltaSharingScope = deltaSharingScope;
+ return this;
+ }
+
+ public DeltaSharingScopeEnum getDeltaSharingScope() {
+ return deltaSharingScope;
+ }
+
+ public UpdateAccountsMetastore setOwner(String owner) {
+ this.owner = owner;
+ return this;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public UpdateAccountsMetastore setPrivilegeModelVersion(String privilegeModelVersion) {
+ this.privilegeModelVersion = privilegeModelVersion;
+ return this;
+ }
+
+ public String getPrivilegeModelVersion() {
+ return privilegeModelVersion;
+ }
+
+ public UpdateAccountsMetastore setStorageRootCredentialId(String storageRootCredentialId) {
+ this.storageRootCredentialId = storageRootCredentialId;
+ return this;
+ }
+
+ public String getStorageRootCredentialId() {
+ return storageRootCredentialId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateAccountsMetastore that = (UpdateAccountsMetastore) o;
+ return Objects.equals(deltaSharingOrganizationName, that.deltaSharingOrganizationName)
+ && Objects.equals(
+ deltaSharingRecipientTokenLifetimeInSeconds,
+ that.deltaSharingRecipientTokenLifetimeInSeconds)
+ && Objects.equals(deltaSharingScope, that.deltaSharingScope)
+ && Objects.equals(owner, that.owner)
+ && Objects.equals(privilegeModelVersion, that.privilegeModelVersion)
+ && Objects.equals(storageRootCredentialId, that.storageRootCredentialId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ deltaSharingOrganizationName,
+ deltaSharingRecipientTokenLifetimeInSeconds,
+ deltaSharingScope,
+ owner,
+ privilegeModelVersion,
+ storageRootCredentialId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateAccountsMetastore.class)
+ .add("deltaSharingOrganizationName", deltaSharingOrganizationName)
+ .add(
+ "deltaSharingRecipientTokenLifetimeInSeconds",
+ deltaSharingRecipientTokenLifetimeInSeconds)
+ .add("deltaSharingScope", deltaSharingScope)
+ .add("owner", owner)
+ .add("privilegeModelVersion", privilegeModelVersion)
+ .add("storageRootCredentialId", storageRootCredentialId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java
new file mode 100755
index 000000000..22801de40
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java
@@ -0,0 +1,183 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateAccountsStorageCredential {
+ /** The AWS IAM role configuration. */
+ @JsonProperty("aws_iam_role")
+ private AwsIamRoleRequest awsIamRole;
+
+ /** The Azure managed identity configuration. */
+ @JsonProperty("azure_managed_identity")
+ private AzureManagedIdentityResponse azureManagedIdentity;
+
+ /** The Azure service principal configuration. */
+ @JsonProperty("azure_service_principal")
+ private AzureServicePrincipal azureServicePrincipal;
+
+ /** The Cloudflare API token configuration. */
+ @JsonProperty("cloudflare_api_token")
+ private CloudflareApiToken cloudflareApiToken;
+
+ /** Comment associated with the credential. */
+ @JsonProperty("comment")
+ private String comment;
+
+ /** The Databricks managed GCP service account configuration. */
+ @JsonProperty("databricks_gcp_service_account")
+ private DatabricksGcpServiceAccountRequest databricksGcpServiceAccount;
+
+ /**
+ * Whether the current securable is accessible from all workspaces or a specific set of
+ * workspaces.
+ */
+ @JsonProperty("isolation_mode")
+ private IsolationMode isolationMode;
+
+ /** Username of current owner of credential. */
+ @JsonProperty("owner")
+ private String owner;
+
+ /**
+ * Whether the credential is usable only for read operations. Only applicable when purpose is
+ * **STORAGE**.
+ */
+ @JsonProperty("read_only")
+ private Boolean readOnly;
+
+ public UpdateAccountsStorageCredential setAwsIamRole(AwsIamRoleRequest awsIamRole) {
+ this.awsIamRole = awsIamRole;
+ return this;
+ }
+
+ public AwsIamRoleRequest getAwsIamRole() {
+ return awsIamRole;
+ }
+
+ public UpdateAccountsStorageCredential setAzureManagedIdentity(
+ AzureManagedIdentityResponse azureManagedIdentity) {
+ this.azureManagedIdentity = azureManagedIdentity;
+ return this;
+ }
+
+ public AzureManagedIdentityResponse getAzureManagedIdentity() {
+ return azureManagedIdentity;
+ }
+
+ public UpdateAccountsStorageCredential setAzureServicePrincipal(
+ AzureServicePrincipal azureServicePrincipal) {
+ this.azureServicePrincipal = azureServicePrincipal;
+ return this;
+ }
+
+ public AzureServicePrincipal getAzureServicePrincipal() {
+ return azureServicePrincipal;
+ }
+
+ public UpdateAccountsStorageCredential setCloudflareApiToken(
+ CloudflareApiToken cloudflareApiToken) {
+ this.cloudflareApiToken = cloudflareApiToken;
+ return this;
+ }
+
+ public CloudflareApiToken getCloudflareApiToken() {
+ return cloudflareApiToken;
+ }
+
+ public UpdateAccountsStorageCredential setComment(String comment) {
+ this.comment = comment;
+ return this;
+ }
+
+ public String getComment() {
+ return comment;
+ }
+
+ public UpdateAccountsStorageCredential setDatabricksGcpServiceAccount(
+ DatabricksGcpServiceAccountRequest databricksGcpServiceAccount) {
+ this.databricksGcpServiceAccount = databricksGcpServiceAccount;
+ return this;
+ }
+
+ public DatabricksGcpServiceAccountRequest getDatabricksGcpServiceAccount() {
+ return databricksGcpServiceAccount;
+ }
+
+ public UpdateAccountsStorageCredential setIsolationMode(IsolationMode isolationMode) {
+ this.isolationMode = isolationMode;
+ return this;
+ }
+
+ public IsolationMode getIsolationMode() {
+ return isolationMode;
+ }
+
+ public UpdateAccountsStorageCredential setOwner(String owner) {
+ this.owner = owner;
+ return this;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public UpdateAccountsStorageCredential setReadOnly(Boolean readOnly) {
+ this.readOnly = readOnly;
+ return this;
+ }
+
+ public Boolean getReadOnly() {
+ return readOnly;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateAccountsStorageCredential that = (UpdateAccountsStorageCredential) o;
+ return Objects.equals(awsIamRole, that.awsIamRole)
+ && Objects.equals(azureManagedIdentity, that.azureManagedIdentity)
+ && Objects.equals(azureServicePrincipal, that.azureServicePrincipal)
+ && Objects.equals(cloudflareApiToken, that.cloudflareApiToken)
+ && Objects.equals(comment, that.comment)
+ && Objects.equals(databricksGcpServiceAccount, that.databricksGcpServiceAccount)
+ && Objects.equals(isolationMode, that.isolationMode)
+ && Objects.equals(owner, that.owner)
+ && Objects.equals(readOnly, that.readOnly);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ awsIamRole,
+ azureManagedIdentity,
+ azureServicePrincipal,
+ cloudflareApiToken,
+ comment,
+ databricksGcpServiceAccount,
+ isolationMode,
+ owner,
+ readOnly);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateAccountsStorageCredential.class)
+ .add("awsIamRole", awsIamRole)
+ .add("azureManagedIdentity", azureManagedIdentity)
+ .add("azureServicePrincipal", azureServicePrincipal)
+ .add("cloudflareApiToken", cloudflareApiToken)
+ .add("comment", comment)
+ .add("databricksGcpServiceAccount", databricksGcpServiceAccount)
+ .add("isolationMode", isolationMode)
+ .add("owner", owner)
+ .add("readOnly", readOnly)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java
index a785536bf..bdbf23c12 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java
@@ -16,7 +16,7 @@ public class UpdateFunction {
*/
@JsonIgnore private String name;
- /** Username of current owner of function. */
+ /** Username of current owner of the function. */
@JsonProperty("owner")
private String owner;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java
index 4e220749c..b49178472 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java
@@ -6,20 +6,114 @@
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
import java.util.Objects;
@Generated
public class UpdateModelVersionRequest {
+ /** List of aliases associated with the model version */
+ @JsonProperty("aliases")
+ private Collection [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external
+ */
@JsonProperty("volume_type")
private VolumeType volumeType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java
index fcb9f83ac..044f72a39 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java
@@ -4,13 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * The type of the volume. An external volume is located in the specified external location. A
- * managed volume is located in the default location which is specified by the parent schema, or the
- * parent catalog, or the Metastore. [Learn more]
- *
- * [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external
- */
@Generated
public enum VolumeType {
EXTERNAL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java
index 609ef16ba..7b0ea8f0a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java
@@ -78,7 +78,7 @@ public Iterable The returned volumes are filtered based on the privileges of the calling user. For example,
* the metastore admin is able to list all the volumes. A regular user needs to be the owner or
- * have the **READ VOLUME** privilege on the volume to recieve the volumes in the response. For
+ * have the **READ VOLUME** privilege on the volume to receive the volumes in the response. For
* the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the
* parent catalog and the **USE_SCHEMA** privilege on the parent schema.
*
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java
index fe725c7ef..7ff906c48 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java
@@ -52,7 +52,7 @@ public interface VolumesService {
*
* The returned volumes are filtered based on the privileges of the calling user. For example,
* the metastore admin is able to list all the volumes. A regular user needs to be the owner or
- * have the **READ VOLUME** privilege on the volume to recieve the volumes in the response. For
+ * have the **READ VOLUME** privilege on the volume to receive the volumes in the response. For
* the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the
* parent catalog and the **USE_SCHEMA** privilege on the parent schema.
*
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java
index 28acb8090..7e131ef28 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java
@@ -36,13 +36,9 @@ public class Environment {
@JsonProperty("environment_version")
private String environmentVersion;
- /** Use `java_dependencies` instead. */
- @JsonProperty("jar_dependencies")
- private Collection 1. A Data URL with base64-encoded image data: `data:image/{type};base64,{base64-data}`.
+ * Example: `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA...`
+ *
+ * 2. A FileStore file path for large images: `/plots/{filename}.png`. Example:
+ * `/plots/b6a7ad70-fb2c-4353-8aed-3f1e015174a4.png`
+ */
@JsonProperty("fileName")
private String fileName;
- /** */
+ /** List of image data for multiple images. Each element follows the same format as file_name. */
@JsonProperty("fileNames")
private Collection [examples]:
+ * https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html
+ */
+ @JsonProperty("quartz_cron_expression")
+ private String quartzCronExpression;
+
+ /**
+ * A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See
+ * `Java TimeZone [create metric definition]:
+ * https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition
+ */
+ @JsonProperty("definition")
+ private String definition;
+
+ /**
+ * A list of column names in the input table the metric should be computed for. Can use
+ * ``":table"`` to indicate that the metric needs information from multiple columns.
+ */
+ @JsonProperty("input_columns")
+ private Collection For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the
+ * table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent
+ * schema, and have **SELECT** access on the table. 3. have the following permissions: -
+ * **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
+ * be an owner of the table.
+ *
+ * Workspace assets, such as the dashboard, will be created in the workspace where this call
+ * was made.
+ */
+ public Monitor createMonitor(CreateMonitorRequest request) {
+ return impl.createMonitor(request);
+ }
+
+ /**
+ * Creates a refresh. Currently only supported for the `table` `object_type`.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table
+ */
+ public Refresh createRefresh(CreateRefreshRequest request) {
+ return impl.createRefresh(request);
+ }
+
+ public void deleteMonitor(String objectType, String objectId) {
+ deleteMonitor(new DeleteMonitorRequest().setObjectType(objectType).setObjectId(objectId));
+ }
+
+ /**
+ * Delete a data quality monitor on Unity Catalog object.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ *
+ * Note that the metric tables and dashboard will not be deleted as part of this call; those
+ * assets must be manually cleaned up (if desired).
+ */
+ public void deleteMonitor(DeleteMonitorRequest request) {
+ impl.deleteMonitor(request);
+ }
+
+ public void deleteRefresh(String objectType, String objectId, long refreshId) {
+ deleteRefresh(
+ new DeleteRefreshRequest()
+ .setObjectType(objectType)
+ .setObjectId(objectId)
+ .setRefreshId(refreshId));
+ }
+
+ /** (Unimplemented) Delete a refresh */
+ public void deleteRefresh(DeleteRefreshRequest request) {
+ impl.deleteRefresh(request);
+ }
+
+ public Monitor getMonitor(String objectType, String objectId) {
+ return getMonitor(new GetMonitorRequest().setObjectType(objectType).setObjectId(objectId));
+ }
+
+ /**
+ * Read a data quality monitor on Unity Catalog object.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema. 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ *
+ * The returned information includes configuration values, as well as information on assets
+ * created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is
+ * in a different workspace than where the monitor was created.
+ */
+ public Monitor getMonitor(GetMonitorRequest request) {
+ return impl.getMonitor(request);
+ }
+
+ public Refresh getRefresh(String objectType, String objectId, long refreshId) {
+ return getRefresh(
+ new GetRefreshRequest()
+ .setObjectType(objectType)
+ .setObjectId(objectId)
+ .setRefreshId(refreshId));
+ }
+
+ /**
+ * Get data quality monitor refresh.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ public Refresh getRefresh(GetRefreshRequest request) {
+ return impl.getRefresh(request);
+ }
+
+ /** (Unimplemented) List data quality monitors. */
+ public Iterable For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ public Iterable For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ */
+ public Monitor updateMonitor(UpdateMonitorRequest request) {
+ return impl.updateMonitor(request);
+ }
+
+ /** (Unimplemented) Update a refresh */
+ public Refresh updateRefresh(UpdateRefreshRequest request) {
+ return impl.updateRefresh(request);
+ }
+
+ public DataQualityService impl() {
+ return impl;
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java
new file mode 100755
index 000000000..7411d79f5
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java
@@ -0,0 +1,190 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.core.ApiClient;
+import com.databricks.sdk.core.DatabricksException;
+import com.databricks.sdk.core.http.Request;
+import com.databricks.sdk.support.Generated;
+import java.io.IOException;
+
+/** Package-local implementation of DataQuality */
+@Generated
+class DataQualityImpl implements DataQualityService {
+ private final ApiClient apiClient;
+
+ public DataQualityImpl(ApiClient apiClient) {
+ this.apiClient = apiClient;
+ }
+
+ @Override
+ public CancelRefreshResponse cancelRefresh(CancelRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s/cancel",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, CancelRefreshResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Monitor createMonitor(CreateMonitorRequest request) {
+ String path = "/api/data-quality/v1/monitors";
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request.getMonitor()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Monitor.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Refresh createRefresh(CreateRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes",
+ request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request.getRefresh()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Refresh.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public void deleteMonitor(DeleteMonitorRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s", request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("DELETE", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ apiClient.execute(req, Void.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public void deleteRefresh(DeleteRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("DELETE", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ apiClient.execute(req, Void.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Monitor getMonitor(GetMonitorRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s", request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, Monitor.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Refresh getRefresh(GetRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, Refresh.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public ListMonitorResponse listMonitor(ListMonitorRequest request) {
+ String path = "/api/data-quality/v1/monitors";
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, ListMonitorResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public ListRefreshResponse listRefresh(ListRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes",
+ request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, ListRefreshResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Monitor updateMonitor(UpdateMonitorRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s", request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("PATCH", path, apiClient.serialize(request.getMonitor()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Monitor.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Refresh updateRefresh(UpdateRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("PATCH", path, apiClient.serialize(request.getRefresh()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Refresh.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java
new file mode 100755
index 000000000..1e5487768
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java
@@ -0,0 +1,111 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/**
+ * Manage the data quality of Unity Catalog objects (currently support `schema` and `table`)
+ *
+ * This is the high-level interface, that contains generated methods.
+ *
+ * Evolving: this interface is under development. Method signatures may change.
+ */
+@Generated
+public interface DataQualityService {
+ /**
+ * Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`.
+ */
+ CancelRefreshResponse cancelRefresh(CancelRefreshRequest cancelRefreshRequest);
+
+ /**
+ * Create a data quality monitor on a Unity Catalog object. The caller must provide either
+ * `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the
+ * table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent
+ * schema, and have **SELECT** access on the table. 3. have the following permissions: -
+ * **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
+ * be an owner of the table.
+ *
+ * Workspace assets, such as the dashboard, will be created in the workspace where this call
+ * was made.
+ */
+ Monitor createMonitor(CreateMonitorRequest createMonitorRequest);
+
+ /**
+ * Creates a refresh. Currently only supported for the `table` `object_type`.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table
+ */
+ Refresh createRefresh(CreateRefreshRequest createRefreshRequest);
+
+ /**
+ * Delete a data quality monitor on Unity Catalog object.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ *
+ * Note that the metric tables and dashboard will not be deleted as part of this call; those
+ * assets must be manually cleaned up (if desired).
+ */
+ void deleteMonitor(DeleteMonitorRequest deleteMonitorRequest);
+
+ /** (Unimplemented) Delete a refresh */
+ void deleteRefresh(DeleteRefreshRequest deleteRefreshRequest);
+
+ /**
+ * Read a data quality monitor on Unity Catalog object.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema. 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ *
+ * The returned information includes configuration values, as well as information on assets
+ * created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is
+ * in a different workspace than where the monitor was created.
+ */
+ Monitor getMonitor(GetMonitorRequest getMonitorRequest);
+
+ /**
+ * Get data quality monitor refresh.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ Refresh getRefresh(GetRefreshRequest getRefreshRequest);
+
+ /** (Unimplemented) List data quality monitors. */
+ ListMonitorResponse listMonitor(ListMonitorRequest listMonitorRequest);
+
+ /**
+ * List data quality monitor refreshes.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ ListRefreshResponse listRefresh(ListRefreshRequest listRefreshRequest);
+
+ /**
+ * Update a data quality monitor on Unity Catalog object.
+ *
+ * For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ */
+ Monitor updateMonitor(UpdateMonitorRequest updateMonitorRequest);
+
+ /** (Unimplemented) Update a refresh */
+ Refresh updateRefresh(UpdateRefreshRequest updateRefreshRequest);
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java
new file mode 100755
index 000000000..0479ce355
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java
@@ -0,0 +1,56 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class DeleteMonitorRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: `schema` or `table`. */
+ @JsonIgnore private String objectType;
+
+ public DeleteMonitorRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public DeleteMonitorRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DeleteMonitorRequest that = (DeleteMonitorRequest) o;
+ return Objects.equals(objectId, that.objectId) && Objects.equals(objectType, that.objectType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(DeleteMonitorRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java
new file mode 100755
index 000000000..6ec839ce9
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java
@@ -0,0 +1,71 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class DeleteRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: `schema` or `table`. */
+ @JsonIgnore private String objectType;
+
+ /** Unique id of the refresh operation. */
+ @JsonIgnore private Long refreshId;
+
+ public DeleteRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public DeleteRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public DeleteRefreshRequest setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DeleteRefreshRequest that = (DeleteRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refreshId, that.refreshId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refreshId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(DeleteRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refreshId", refreshId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java
new file mode 100755
index 000000000..cdb1e5136
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java
@@ -0,0 +1,56 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class GetMonitorRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: `schema` or `table`. */
+ @JsonIgnore private String objectType;
+
+ public GetMonitorRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public GetMonitorRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetMonitorRequest that = (GetMonitorRequest) o;
+ return Objects.equals(objectId, that.objectId) && Objects.equals(objectType, that.objectType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetMonitorRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java
new file mode 100755
index 000000000..9280dce0f
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java
@@ -0,0 +1,71 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class GetRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: `schema` or `table`. */
+ @JsonIgnore private String objectType;
+
+ /** Unique id of the refresh operation. */
+ @JsonIgnore private Long refreshId;
+
+ public GetRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public GetRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public GetRefreshRequest setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetRefreshRequest that = (GetRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refreshId, that.refreshId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refreshId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refreshId", refreshId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java
new file mode 100755
index 000000000..f576433ae
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java
@@ -0,0 +1,124 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Inference log configuration. */
+@Generated
+public class InferenceLogConfig {
+ /**
+ * List of granularities to use when aggregating data into time windows based on their timestamp.
+ */
+ @JsonProperty("granularities")
+ private Collection Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC
- * endpoints that in your account that can connect to your workspace over AWS PrivateLink.
- *
- * If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`,
- * this control only works for PrivateLink connections. To control how your workspace is accessed
- * via public internet, see [IP access lists].
- *
- * [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
+ * An array of Databricks VPC endpoint IDs. This is the Databricks ID returned when registering
+ * the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC
+ * endpoint in AWS. Only used when private_access_level is set to ENDPOINT. This is an allow list
+ * of VPC endpoints registered in your Databricks account that can connect to your workspace over
+ * AWS PrivateLink. Note: If hybrid access to your workspace is enabled by setting
+ * public_access_enabled to true, this control only works for PrivateLink connections. To control
+ * how your workspace is accessed via public internet, see IP access lists.
*/
@JsonProperty("allowed_vpc_endpoint_ids")
private Collection To set this value, you must have a deployment name prefix. Contact your Databricks account
- * team to add an account deployment name prefix to your account.
- *
- * Workspace deployment names follow the account prefix and a hyphen. For example, if your
- * account's deployment prefix is `acme` and the workspace deployment name is `workspace-1`, the
- * JSON response for the `deployment_name` field becomes `acme-workspace-1`. The workspace URL
- * would be `acme-workspace-1.cloud.databricks.com`.
- *
- * You can also set the `deployment_name` to the reserved keyword `EMPTY` if you want the
- * deployment name to only include the deployment prefix. For example, if your account's
- * deployment prefix is `acme` and the workspace deployment name is `EMPTY`, the `deployment_name`
- * becomes `acme` only and the workspace URL is `acme.cloud.databricks.com`.
- *
- * This value must be unique across all non-deleted deployments across all AWS regions.
- *
- * If a new workspace omits this property, the server generates a unique deployment name for
- * you with the pattern `dbc-xxxxxxxx-xxxx`.
+ * web application and REST APIs is Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
+ * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
*
* [AWS PrivateLink]: https://aws.amazon.com/privatelink/ [Databricks article about
* PrivateLink]:
@@ -115,19 +113,19 @@ public class CreateWorkspaceRequest {
@JsonProperty("private_access_settings_id")
private String privateAccessSettingsId;
- /** The ID of the workspace's storage configuration object. */
+ /** ID of the workspace's storage configuration object. */
@JsonProperty("storage_configuration_id")
private String storageConfigurationId;
/**
* The ID of the workspace's storage encryption key configuration object. This is used to encrypt
* the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS
- * volumes. The provided key configuration object property `use_cases` must contain `STORAGE`.
+ * volumes. The provided key configuration object property use_cases must contain STORAGE.
*/
@JsonProperty("storage_customer_managed_key_id")
private String storageCustomerManagedKeyId;
- /** The workspace's human-readable name. */
+ /** The human-readable name of the workspace. */
@JsonProperty("workspace_name")
private String workspaceName;
@@ -159,6 +157,15 @@ public CloudResourceContainer getCloudResourceContainer() {
return cloudResourceContainer;
}
+ public CreateWorkspaceRequest setComputeMode(CustomerFacingComputeMode computeMode) {
+ this.computeMode = computeMode;
+ return this;
+ }
+
+ public CustomerFacingComputeMode getComputeMode() {
+ return computeMode;
+ }
+
public CreateWorkspaceRequest setCredentialsId(String credentialsId) {
this.credentialsId = credentialsId;
return this;
@@ -205,15 +212,6 @@ public GkeConfig getGkeConfig() {
return gkeConfig;
}
- public CreateWorkspaceRequest setIsNoPublicIpEnabled(Boolean isNoPublicIpEnabled) {
- this.isNoPublicIpEnabled = isNoPublicIpEnabled;
- return this;
- }
-
- public Boolean getIsNoPublicIpEnabled() {
- return isNoPublicIpEnabled;
- }
-
public CreateWorkspaceRequest setLocation(String location) {
this.location = location;
return this;
@@ -295,12 +293,12 @@ public boolean equals(Object o) {
return Objects.equals(awsRegion, that.awsRegion)
&& Objects.equals(cloud, that.cloud)
&& Objects.equals(cloudResourceContainer, that.cloudResourceContainer)
+ && Objects.equals(computeMode, that.computeMode)
&& Objects.equals(credentialsId, that.credentialsId)
&& Objects.equals(customTags, that.customTags)
&& Objects.equals(deploymentName, that.deploymentName)
&& Objects.equals(gcpManagedNetworkConfig, that.gcpManagedNetworkConfig)
&& Objects.equals(gkeConfig, that.gkeConfig)
- && Objects.equals(isNoPublicIpEnabled, that.isNoPublicIpEnabled)
&& Objects.equals(location, that.location)
&& Objects.equals(
managedServicesCustomerManagedKeyId, that.managedServicesCustomerManagedKeyId)
@@ -318,12 +316,12 @@ public int hashCode() {
awsRegion,
cloud,
cloudResourceContainer,
+ computeMode,
credentialsId,
customTags,
deploymentName,
gcpManagedNetworkConfig,
gkeConfig,
- isNoPublicIpEnabled,
location,
managedServicesCustomerManagedKeyId,
networkId,
@@ -340,12 +338,12 @@ public String toString() {
.add("awsRegion", awsRegion)
.add("cloud", cloud)
.add("cloudResourceContainer", cloudResourceContainer)
+ .add("computeMode", computeMode)
.add("credentialsId", credentialsId)
.add("customTags", customTags)
.add("deploymentName", deploymentName)
.add("gcpManagedNetworkConfig", gcpManagedNetworkConfig)
.add("gkeConfig", gkeConfig)
- .add("isNoPublicIpEnabled", isNoPublicIpEnabled)
.add("location", location)
.add("managedServicesCustomerManagedKeyId", managedServicesCustomerManagedKeyId)
.add("networkId", networkId)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java
index 40ca82451..035ea5659 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java
@@ -48,16 +48,16 @@ public Credential create(CreateCredentialRequest request) {
return impl.create(request);
}
- public void delete(String credentialsId) {
- delete(new DeleteCredentialRequest().setCredentialsId(credentialsId));
+ public Credential delete(String credentialsId) {
+ return delete(new DeleteCredentialRequest().setCredentialsId(credentialsId));
}
/**
* Deletes a Databricks credential configuration object for an account, both specified by ID. You
* cannot delete a credential that is associated with any workspace.
*/
- public void delete(DeleteCredentialRequest request) {
- impl.delete(request);
+ public Credential delete(DeleteCredentialRequest request) {
+ return impl.delete(request);
}
public Credential get(String credentialsId) {
@@ -69,7 +69,7 @@ public Credential get(GetCredentialRequest request) {
return impl.get(request);
}
- /** Gets all Databricks credential configurations associated with an account specified by ID. */
+ /** List Databricks credential configuration objects for an account, specified by ID. */
public Iterable **Important**: Customer-managed keys are supported only for some deployment types,
- * subscription types, and AWS regions.
- *
- * This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks customer-managed key configurations for an account. */
public Iterable **Important**: Customer-managed keys are supported only for some deployment types,
- * subscription types, and AWS regions.
- *
- * This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks customer-managed key configurations for an account. */
Collection [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html
- */
@Generated
public enum EndpointUseCase {
DATAPLANE_RELAY_ACCESS,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java
index 38b0ddcdb..5ccb0ab61 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java
@@ -6,8 +6,8 @@
import com.fasterxml.jackson.annotation.JsonProperty;
/**
- * The AWS resource associated with this error: credentials, VPC, subnet, security group, or network
- * ACL.
+ * ErrorType and WarningType are used to represent the type of error or warning by NetworkHealth and
+ * NetworkWarning defined in central/api/accounts/accounts.proto
*/
@Generated
public enum ErrorType {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java
deleted file mode 100755
index 7654c68e7..000000000
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
-
-package com.databricks.sdk.service.provisioning;
-
-import com.databricks.sdk.support.Generated;
-import com.databricks.sdk.support.ToStringer;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import java.util.Objects;
-
-@Generated
-public class ExternalCustomerInfo {
- /** Email of the authoritative user. */
- @JsonProperty("authoritative_user_email")
- private String authoritativeUserEmail;
-
- /** The authoritative user full name. */
- @JsonProperty("authoritative_user_full_name")
- private String authoritativeUserFullName;
-
- /** The legal entity name for the external workspace */
- @JsonProperty("customer_name")
- private String customerName;
-
- public ExternalCustomerInfo setAuthoritativeUserEmail(String authoritativeUserEmail) {
- this.authoritativeUserEmail = authoritativeUserEmail;
- return this;
- }
-
- public String getAuthoritativeUserEmail() {
- return authoritativeUserEmail;
- }
-
- public ExternalCustomerInfo setAuthoritativeUserFullName(String authoritativeUserFullName) {
- this.authoritativeUserFullName = authoritativeUserFullName;
- return this;
- }
-
- public String getAuthoritativeUserFullName() {
- return authoritativeUserFullName;
- }
-
- public ExternalCustomerInfo setCustomerName(String customerName) {
- this.customerName = customerName;
- return this;
- }
-
- public String getCustomerName() {
- return customerName;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ExternalCustomerInfo that = (ExternalCustomerInfo) o;
- return Objects.equals(authoritativeUserEmail, that.authoritativeUserEmail)
- && Objects.equals(authoritativeUserFullName, that.authoritativeUserFullName)
- && Objects.equals(customerName, that.customerName);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(authoritativeUserEmail, authoritativeUserFullName, customerName);
- }
-
- @Override
- public String toString() {
- return new ToStringer(ExternalCustomerInfo.class)
- .add("authoritativeUserEmail", authoritativeUserEmail)
- .add("authoritativeUserFullName", authoritativeUserFullName)
- .add("customerName", customerName)
- .toString();
- }
-}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java
new file mode 100755
index 000000000..21b86acd9
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java
@@ -0,0 +1,67 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/**
+ * The shared network config for GCP workspace. This object has common network configurations that
+ * are network attributions of a workspace. DEPRECATED. Use GkeConfig instead.
+ */
+@Generated
+public class GcpCommonNetworkConfig {
+ /**
+ * The IP range that will be used to allocate GKE cluster master resources from. This field must
+ * not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.
+ */
+ @JsonProperty("gke_cluster_master_ip_range")
+ private String gkeClusterMasterIpRange;
+
+ /** The type of network connectivity of the GKE cluster. */
+ @JsonProperty("gke_connectivity_type")
+ private GkeConfigConnectivityType gkeConnectivityType;
+
+ public GcpCommonNetworkConfig setGkeClusterMasterIpRange(String gkeClusterMasterIpRange) {
+ this.gkeClusterMasterIpRange = gkeClusterMasterIpRange;
+ return this;
+ }
+
+ public String getGkeClusterMasterIpRange() {
+ return gkeClusterMasterIpRange;
+ }
+
+ public GcpCommonNetworkConfig setGkeConnectivityType(
+ GkeConfigConnectivityType gkeConnectivityType) {
+ this.gkeConnectivityType = gkeConnectivityType;
+ return this;
+ }
+
+ public GkeConfigConnectivityType getGkeConnectivityType() {
+ return gkeConnectivityType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GcpCommonNetworkConfig that = (GcpCommonNetworkConfig) o;
+ return Objects.equals(gkeClusterMasterIpRange, that.gkeClusterMasterIpRange)
+ && Objects.equals(gkeConnectivityType, that.gkeConnectivityType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(gkeClusterMasterIpRange, gkeConnectivityType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GcpCommonNetworkConfig.class)
+ .add("gkeClusterMasterIpRange", gkeClusterMasterIpRange)
+ .add("gkeConnectivityType", gkeConnectivityType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java
index be5eba082..2a6b12355 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java
@@ -9,7 +9,10 @@
@Generated
public class GcpKeyInfo {
- /** The GCP KMS key's resource name */
+ /**
+ * Globally unique kms key resource id of the form
+ * projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4
+ */
@JsonProperty("kms_key_id")
private String kmsKeyId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java
index e27c531f4..d6e9e9efd 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java
@@ -7,46 +7,20 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/**
- * The network settings for the workspace. The configurations are only for Databricks-managed VPCs.
- * It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP range
- * configurations must be mutually exclusive. An attempt to create a workspace fails if Databricks
- * detects an IP range overlap.
- *
- * Specify custom IP ranges in CIDR format. The IP ranges for these fields must not overlap, and
- * all IP addresses must be entirely within the following ranges: `10.0.0.0/8`, `100.64.0.0/10`,
- * `172.16.0.0/12`, `192.168.0.0/16`, and `240.0.0.0/4`.
- *
- * The sizes of these IP ranges affect the maximum number of nodes for the workspace.
- *
- * **Important**: Confirm the IP ranges used by your Databricks workspace before creating the
- * workspace. You cannot change them after your workspace is deployed. If the IP address ranges for
- * your Databricks are too small, IP exhaustion can occur, causing your Databricks jobs to fail. To
- * determine the address range sizes that you need, Databricks provides a calculator as a Microsoft
- * Excel spreadsheet. See [calculate subnet sizes for a new workspace].
- *
- * [calculate subnet sizes for a new workspace]:
- * https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html
- */
+/** The network configuration for the workspace. */
@Generated
public class GcpManagedNetworkConfig {
- /**
- * The IP range from which to allocate GKE cluster pods. No bigger than `/9` and no smaller than
- * `/21`.
- */
+ /** The IP range that will be used to allocate GKE cluster Pods from. */
@JsonProperty("gke_cluster_pod_ip_range")
private String gkeClusterPodIpRange;
- /**
- * The IP range from which to allocate GKE cluster services. No bigger than `/16` and no smaller
- * than `/27`.
- */
+ /** The IP range that will be used to allocate GKE cluster Services from. */
@JsonProperty("gke_cluster_service_ip_range")
private String gkeClusterServiceIpRange;
/**
- * The IP range from which to allocate GKE cluster nodes. No bigger than `/9` and no smaller than
- * `/29`.
+ * The IP range which will be used to allocate GKE cluster nodes from. Note: Pods, services and
+ * master IP range must be mutually exclusive.
*/
@JsonProperty("subnet_cidr")
private String subnetCidr;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java
index 08f497789..ee4f4e301 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java
@@ -7,42 +7,35 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/**
- * The Google Cloud specific information for this network (for example, the VPC ID, subnet ID, and
- * secondary IP ranges).
- */
@Generated
public class GcpNetworkInfo {
- /** The Google Cloud project ID of the VPC network. */
+ /** The GCP project ID for network resources. This project is where the VPC and subnet resides. */
@JsonProperty("network_project_id")
private String networkProjectId;
/**
- * The name of the secondary IP range for pods. A Databricks-managed GKE cluster uses this IP
- * range for its pods. This secondary IP range can be used by only one workspace.
+ * Name of the secondary range within the subnet that will be used by GKE as Pod IP range. This is
+ * BYO VPC specific. DB VPC uses network.getGcpManagedNetworkConfig.getGkeClusterPodIpRange
*/
@JsonProperty("pod_ip_range_name")
private String podIpRangeName;
- /**
- * The name of the secondary IP range for services. A Databricks-managed GKE cluster uses this IP
- * range for its services. This secondary IP range can be used by only one workspace.
- */
+ /** Name of the secondary range within the subnet that will be used by GKE as Service IP range. */
@JsonProperty("service_ip_range_name")
private String serviceIpRangeName;
- /** The ID of the subnet associated with this network. */
+ /**
+ * The customer-provided Subnet ID that will be available to Clusters in Workspaces using this
+ * Network.
+ */
@JsonProperty("subnet_id")
private String subnetId;
- /** The Google Cloud region of the workspace data plane (for example, `us-east4`). */
+ /** */
@JsonProperty("subnet_region")
private String subnetRegion;
- /**
- * The ID of the VPC associated with this network. VPC IDs can be used in multiple network
- * configurations.
- */
+ /** The customer-provided VPC ID. */
@JsonProperty("vpc_id")
private String vpcId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java
index 3298b72bf..2cbbbbec5 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java
@@ -7,26 +7,25 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** The Google Cloud specific information for this Private Service Connect endpoint. */
@Generated
public class GcpVpcEndpointInfo {
- /** Region of the PSC endpoint. */
+ /** */
@JsonProperty("endpoint_region")
private String endpointRegion;
- /** The Google Cloud project ID of the VPC network where the PSC connection resides. */
+ /** */
@JsonProperty("project_id")
private String projectId;
- /** The unique ID of this PSC connection. */
+ /** */
@JsonProperty("psc_connection_id")
private String pscConnectionId;
- /** The name of the PSC endpoint in the Google Cloud project. */
+ /** */
@JsonProperty("psc_endpoint_name")
private String pscEndpointName;
- /** The service attachment this PSC connection connects to. */
+ /** */
@JsonProperty("service_attachment_id")
private String serviceAttachmentId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java
index f4b3add8a..0d95a3d55 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetCredentialRequest {
- /** Databricks Account API credential configuration ID */
+ /** Credential configuration ID */
@JsonIgnore private String credentialsId;
public GetCredentialRequest setCredentialsId(String credentialsId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java
index 563d2ea88..afe93b072 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetPrivateAccesRequest {
- /** Databricks Account API private access settings ID. */
+ /** */
@JsonIgnore private String privateAccessSettingsId;
public GetPrivateAccesRequest setPrivateAccessSettingsId(String privateAccessSettingsId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java
index f9a99b30a..15bae7e12 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetStorageRequest {
- /** Databricks Account API storage configuration ID. */
+ /** */
@JsonIgnore private String storageConfigurationId;
public GetStorageRequest setStorageConfigurationId(String storageConfigurationId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java
index 7b8b7bb44..30ece2cca 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetWorkspaceRequest {
- /** Workspace ID. */
+ /** */
@JsonIgnore private Long workspaceId;
public GetWorkspaceRequest setWorkspaceId(Long workspaceId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java
index 064319e4f..4435e9994 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java
@@ -7,26 +7,16 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** The configurations for the GKE cluster of a Databricks workspace. */
+/** The configurations of the GKE cluster used by the GCP workspace. */
@Generated
public class GkeConfig {
- /**
- * Specifies the network connectivity types for the GKE nodes and the GKE master network.
- *
- * Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE
- * nodes will not have public IPs.
- *
- * Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE
- * cluster have public IP addresses.
- */
+ /** The type of network connectivity of the GKE cluster. */
@JsonProperty("connectivity_type")
private GkeConfigConnectivityType connectivityType;
/**
- * The IP range from which to allocate GKE cluster master resources. This field will be ignored if
- * GKE private cluster is not enabled.
- *
- * It must be exactly as big as `/28`.
+ * The IP range that will be used to allocate GKE cluster master resources from. This field must
+ * not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.
*/
@JsonProperty("master_ip_range")
private String masterIpRange;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java
new file mode 100755
index 000000000..14dc163c3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The credential ID that is used to access the key vault. */
+@Generated
+public class KeyAccessConfiguration {
+ /** */
+ @JsonProperty("credential_id")
+ private String credentialId;
+
+ public KeyAccessConfiguration setCredentialId(String credentialId) {
+ this.credentialId = credentialId;
+ return this;
+ }
+
+ public String getCredentialId() {
+ return credentialId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ KeyAccessConfiguration that = (KeyAccessConfiguration) o;
+ return Objects.equals(credentialId, that.credentialId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(credentialId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(KeyAccessConfiguration.class)
+ .add("credentialId", credentialId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java
index 1c019242d..308014cf0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java
@@ -4,15 +4,8 @@
import com.databricks.sdk.support.Generated;
-/**
- * Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data in the control plane
- * * `STORAGE`: Encrypts the workspace's root S3 bucket (root DBFS and system data) and, optionally,
- * cluster EBS volumes.
- */
@Generated
public enum KeyUseCase {
- MANAGED_SERVICES, // Encrypts notebook and secret data in the control plane
- STORAGE, // Encrypts the workspace's root S3 bucket (root DBFS and system data) and,
- // optionally, cluster EBS volumes.
-
+ MANAGED_SERVICES,
+ STORAGE,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java
index b149a4d48..39be0950e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java
@@ -34,11 +34,17 @@ public class Network {
@JsonProperty("network_name")
private String networkName;
- /** */
+ /**
+ * IDs of one to five security groups associated with this network. Security group IDs **cannot**
+ * be used in multiple network configurations.
+ */
@JsonProperty("security_group_ids")
private Collection [AWS PrivateLink]: https://aws.amazon.com/privatelink/
- */
@Generated
public class NetworkVpcEndpoints {
/**
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java
index 02cc7a527..628802833 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java
@@ -34,8 +34,8 @@ public Network create(CreateNetworkRequest request) {
return impl.create(request);
}
- public void delete(String networkId) {
- delete(new DeleteNetworkRequest().setNetworkId(networkId));
+ public Network delete(String networkId) {
+ return delete(new DeleteNetworkRequest().setNetworkId(networkId));
}
/**
@@ -44,8 +44,8 @@ public void delete(String networkId) {
*
* This operation is available only if your account is on the E2 version of the platform.
*/
- public void delete(DeleteNetworkRequest request) {
- impl.delete(request);
+ public Network delete(DeleteNetworkRequest request) {
+ return impl.delete(request);
}
public Network get(String networkId) {
@@ -57,11 +57,7 @@ public Network get(GetNetworkRequest request) {
return impl.get(request);
}
- /**
- * Gets a list of all Databricks network configurations for an account, specified by ID.
- *
- * This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks network configurations for an account. */
public Iterable This operation is available only if your account is on the E2 version of the platform.
*/
- void delete(DeleteNetworkRequest deleteNetworkRequest);
+ Network delete(DeleteNetworkRequest deleteNetworkRequest);
/** Gets a Databricks network configuration, which represents a cloud VPC and its resources. */
Network get(GetNetworkRequest getNetworkRequest);
- /**
- * Gets a list of all Databricks network configurations for an account, specified by ID.
- *
- * This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks network configurations for an account. */
Collection [AWS Pricing]: https://databricks.com/product/aws-pricing
- */
@Generated
public enum PricingTier {
COMMUNITY_EDITION,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java
index 82b70e7a6..644d702be 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java
@@ -24,87 +24,53 @@ public PrivateAccessAPI(PrivateAccessService mock) {
}
/**
- * Creates a private access settings object, which specifies how your workspace is accessed over
- * [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings
- * object referenced by ID in the workspace's `private_access_settings_id` property.
- *
- * You can share one private access settings with multiple workspaces in a single account.
- * However, private access settings are specific to AWS regions, so only workspaces in the same
- * AWS region can use a given private access settings object.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Creates a private access settings configuration, which represents network access restrictions
+ * for workspace resources. Private access settings configure whether workspaces can be accessed
+ * from the public internet or only from private endpoints.
*/
public PrivateAccessSettings create(CreatePrivateAccessSettingsRequest request) {
return impl.create(request);
}
- public void delete(String privateAccessSettingsId) {
- delete(new DeletePrivateAccesRequest().setPrivateAccessSettingsId(privateAccessSettingsId));
+ public PrivateAccessSettings delete(String privateAccessSettingsId) {
+ return delete(
+ new DeletePrivateAccesRequest().setPrivateAccessSettingsId(privateAccessSettingsId));
}
- /**
- * Deletes a private access settings object, which determines how your workspace is accessed over
- * [AWS PrivateLink].
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
- public void delete(DeletePrivateAccesRequest request) {
- impl.delete(request);
+ /** Deletes a Databricks private access settings configuration, both specified by ID. */
+ public PrivateAccessSettings delete(DeletePrivateAccesRequest request) {
+ return impl.delete(request);
}
public PrivateAccessSettings get(String privateAccessSettingsId) {
return get(new GetPrivateAccesRequest().setPrivateAccessSettingsId(privateAccessSettingsId));
}
- /**
- * Gets a private access settings object, which specifies how your workspace is accessed over [AWS
- * PrivateLink].
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Gets a Databricks private access settings configuration, both specified by ID. */
public PrivateAccessSettings get(GetPrivateAccesRequest request) {
return impl.get(request);
}
- /** Gets a list of all private access settings objects for an account, specified by ID. */
+ /** Lists Databricks private access settings for an account. */
public Iterable This operation completely overwrites your existing private access settings object attached
- * to your workspaces. All workspaces attached to the private access settings are affected by any
- * change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are
- * updated, effects of these changes might take several minutes to propagate to the workspace API.
- *
- * You can share one private access settings object with multiple workspaces in a single
- * account. However, private access settings are specific to AWS regions, so only workspaces in
- * the same AWS region can use a given private access settings object.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access
+ * settings object referenced by ID in the workspace's private_access_settings_id property. This
+ * operation completely overwrites your existing private access settings object attached to your
+ * workspaces. All workspaces attached to the private access settings are affected by any change.
+ * If public_access_enabled, private_access_level, or allowed_vpc_endpoint_ids are updated,
+ * effects of these changes might take several minutes to propagate to the workspace API. You can
+ * share one private access settings object with multiple workspaces in a single account. However,
+ * private access settings are specific to AWS regions, so only workspaces in the same AWS region
+ * can use a given private access settings object. Before configuring PrivateLink, read the
+ * Databricks article about PrivateLink.
*/
- public void replace(ReplacePrivateAccessSettingsRequest request) {
- impl.replace(request);
+ public PrivateAccessSettings replace(ReplacePrivateAccessSettingsRequest request) {
+ return impl.replace(request);
}
public PrivateAccessService impl() {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java
index dcca1eeba..1ed77901e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java
@@ -34,7 +34,7 @@ public PrivateAccessSettings create(CreatePrivateAccessSettingsRequest request)
}
@Override
- public void delete(DeletePrivateAccesRequest request) {
+ public PrivateAccessSettings delete(DeletePrivateAccesRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/private-access-settings/%s",
@@ -43,7 +43,7 @@ public void delete(DeletePrivateAccesRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, PrivateAccessSettings.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
@@ -76,17 +76,19 @@ public Collection You can share one private access settings with multiple workspaces in a single account.
- * However, private access settings are specific to AWS regions, so only workspaces in the same
- * AWS region can use a given private access settings object.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Creates a private access settings configuration, which represents network access restrictions
+ * for workspace resources. Private access settings configure whether workspaces can be accessed
+ * from the public internet or only from private endpoints.
*/
PrivateAccessSettings create(
CreatePrivateAccessSettingsRequest createPrivateAccessSettingsRequest);
- /**
- * Deletes a private access settings object, which determines how your workspace is accessed over
- * [AWS PrivateLink].
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
- void delete(DeletePrivateAccesRequest deletePrivateAccesRequest);
+ /** Deletes a Databricks private access settings configuration, both specified by ID. */
+ PrivateAccessSettings delete(DeletePrivateAccesRequest deletePrivateAccesRequest);
- /**
- * Gets a private access settings object, which specifies how your workspace is accessed over [AWS
- * PrivateLink].
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Gets a Databricks private access settings configuration, both specified by ID. */
PrivateAccessSettings get(GetPrivateAccesRequest getPrivateAccesRequest);
- /** Gets a list of all private access settings objects for an account, specified by ID. */
+ /** Lists Databricks private access settings for an account. */
Collection This operation completely overwrites your existing private access settings object attached
- * to your workspaces. All workspaces attached to the private access settings are affected by any
- * change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are
- * updated, effects of these changes might take several minutes to propagate to the workspace API.
- *
- * You can share one private access settings object with multiple workspaces in a single
- * account. However, private access settings are specific to AWS regions, so only workspaces in
- * the same AWS region can use a given private access settings object.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access
+ * settings object referenced by ID in the workspace's private_access_settings_id property. This
+ * operation completely overwrites your existing private access settings object attached to your
+ * workspaces. All workspaces attached to the private access settings are affected by any change.
+ * If public_access_enabled, private_access_level, or allowed_vpc_endpoint_ids are updated,
+ * effects of these changes might take several minutes to propagate to the workspace API. You can
+ * share one private access settings object with multiple workspaces in a single account. However,
+ * private access settings are specific to AWS regions, so only workspaces in the same AWS region
+ * can use a given private access settings object. Before configuring PrivateLink, read the
+ * Databricks article about PrivateLink.
*/
- void replace(ReplacePrivateAccessSettingsRequest replacePrivateAccessSettingsRequest);
+ PrivateAccessSettings replace(
+ ReplacePrivateAccessSettingsRequest replacePrivateAccessSettingsRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java
index 03c466d78..a36f8b90b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java
@@ -8,17 +8,32 @@
import java.util.Collection;
import java.util.Objects;
+/** * */
@Generated
public class PrivateAccessSettings {
- /** The Databricks account ID that hosts the credential. */
+ /** The Databricks account ID that hosts the private access settings. */
@JsonProperty("account_id")
private String accountId;
- /** An array of Databricks VPC endpoint IDs. */
+ /**
+ * An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when
+ * registering the VPC endpoint configuration in your Databricks account. This is not the ID of
+ * the VPC endpoint in AWS. Only used when private_access_level is set to ENDPOINT. This is an
+ * allow list of VPC endpoints that in your account that can connect to your workspace over AWS
+ * PrivateLink. If hybrid access to your workspace is enabled by setting public_access_enabled to
+ * true, this control only works for PrivateLink connections. To control how your workspace is
+ * accessed via public internet, see IP access lists.
+ */
@JsonProperty("allowed_vpc_endpoint_ids")
private Collection Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC
- * endpoints that in your account that can connect to your workspace over AWS PrivateLink.
- *
- * If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`,
- * this control only works for PrivateLink connections. To control how your workspace is accessed
- * via public internet, see [IP access lists].
- *
- * [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
- */
- @JsonProperty("allowed_vpc_endpoint_ids")
- private Collection For information about how to create a new workspace with this API, see [Create a new
- * workspace using the Account API]
- *
- * [Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
+ /** Creates a Databricks storage configuration for an account. */
public StorageConfiguration create(CreateStorageConfigurationRequest request) {
return impl.create(request);
}
- public void delete(String storageConfigurationId) {
- delete(new DeleteStorageRequest().setStorageConfigurationId(storageConfigurationId));
+ public StorageConfiguration delete(String storageConfigurationId) {
+ return delete(new DeleteStorageRequest().setStorageConfigurationId(storageConfigurationId));
}
/**
* Deletes a Databricks storage configuration. You cannot delete a storage configuration that is
* associated with any workspace.
*/
- public void delete(DeleteStorageRequest request) {
- impl.delete(request);
+ public StorageConfiguration delete(DeleteStorageRequest request) {
+ return impl.delete(request);
}
public StorageConfiguration get(String storageConfigurationId) {
@@ -66,7 +55,7 @@ public StorageConfiguration get(GetStorageRequest request) {
return impl.get(request);
}
- /** Gets a list of all Databricks storage configurations for your account, specified by ID. */
+ /** Lists Databricks storage configurations for an account, specified by ID. */
public Iterable For information about how to create a new workspace with this API, see [Create a new
- * workspace using the Account API]
- *
- * [Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
+ /** Creates a Databricks storage configuration for an account. */
StorageConfiguration create(CreateStorageConfigurationRequest createStorageConfigurationRequest);
/**
* Deletes a Databricks storage configuration. You cannot delete a storage configuration that is
* associated with any workspace.
*/
- void delete(DeleteStorageRequest deleteStorageRequest);
+ StorageConfiguration delete(DeleteStorageRequest deleteStorageRequest);
/** Gets a Databricks storage configuration for an account, both specified by ID. */
StorageConfiguration get(GetStorageRequest getStorageRequest);
- /** Gets a list of all Databricks storage configurations for your account, specified by ID. */
+ /** Lists Databricks storage configurations for an account, specified by ID. */
Collection A field mask of `*` indicates full replacement. It’s recommended to always explicitly list
+ * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if
+ * the API changes in the future.
*/
- @JsonProperty("storage_customer_managed_key_id")
- private String storageCustomerManagedKeyId;
+ @JsonIgnore
+ @QueryParam("update_mask")
+ private String updateMask;
- /** Workspace ID. */
+ /** A unique integer ID for the workspace */
@JsonIgnore private Long workspaceId;
- public UpdateWorkspaceRequest setAwsRegion(String awsRegion) {
- this.awsRegion = awsRegion;
- return this;
- }
-
- public String getAwsRegion() {
- return awsRegion;
- }
-
- public UpdateWorkspaceRequest setCredentialsId(String credentialsId) {
- this.credentialsId = credentialsId;
- return this;
- }
-
- public String getCredentialsId() {
- return credentialsId;
- }
-
- public UpdateWorkspaceRequest setCustomTags(Map Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [AWS VPC endpoint]:
- * https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration
+ * that is associated with any workspace.
*/
- public void delete(DeleteVpcEndpointRequest request) {
- impl.delete(request);
+ public VpcEndpoint delete(DeleteVpcEndpointRequest request) {
+ return impl.delete(request);
}
public VpcEndpoint get(String vpcEndpointId) {
@@ -77,14 +70,7 @@ public VpcEndpoint get(GetVpcEndpointRequest request) {
return impl.get(request);
}
- /**
- * Gets a list of all VPC endpoints for an account, specified by ID.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [Databricks article about PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Lists Databricks VPC endpoint configurations for an account. */
public Iterable Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink [AWS VPC endpoint]:
- * https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration
+ * that is associated with any workspace.
*/
- void delete(DeleteVpcEndpointRequest deleteVpcEndpointRequest);
+ VpcEndpoint delete(DeleteVpcEndpointRequest deleteVpcEndpointRequest);
/**
* Gets a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to
@@ -53,13 +46,6 @@ public interface VpcEndpointsService {
*/
VpcEndpoint get(GetVpcEndpointRequest getVpcEndpointRequest);
- /**
- * Gets a list of all VPC endpoints for an account, specified by ID.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- * [Databricks article about PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Lists Databricks VPC endpoint configurations for an account. */
Collection This value must be unique across all non-deleted deployments across all AWS regions.
- */
+ /** */
@JsonProperty("deployment_name")
private String deploymentName;
/**
- * If this workspace is for a external customer, then external_customer_info is populated. If this
- * workspace is not for a external customer, then external_customer_info is empty.
+ * A client owned field used to indicate the workspace status that the client expects to be in.
+ * For now this is only used to unblock Temporal workflow for GCP least privileged workspace.
*/
- @JsonProperty("external_customer_info")
- private ExternalCustomerInfo externalCustomerInfo;
+ @JsonProperty("expected_workspace_status")
+ private WorkspaceStatus expectedWorkspaceStatus;
/** */
@JsonProperty("gcp_managed_network_config")
@@ -70,10 +69,6 @@ public class Workspace {
@JsonProperty("gke_config")
private GkeConfig gkeConfig;
- /** Whether no public IP is enabled for the workspace. */
- @JsonProperty("is_no_public_ip_enabled")
- private Boolean isNoPublicIpEnabled;
-
/**
* The Google Cloud region of the workspace data plane in your Google account (for example,
* `us-east4`).
@@ -86,8 +81,20 @@ public class Workspace {
private String managedServicesCustomerManagedKeyId;
/**
- * The network configuration ID that is attached to the workspace. This field is available only if
- * the network is a customer-managed network.
+ * The network configuration for the workspace.
+ *
+ * DEPRECATED. Use `network_id` instead.
+ */
+ @JsonProperty("network")
+ private WorkspaceNetwork network;
+
+ /** The object ID of network connectivity config. */
+ @JsonProperty("network_connectivity_config_id")
+ private String networkConnectivityConfigId;
+
+ /**
+ * If this workspace is BYO VPC, then the network_id will be populated. If this workspace is not
+ * BYO VPC, then the network_id will be empty.
*/
@JsonProperty("network_id")
private String networkId;
@@ -118,6 +125,10 @@ public class Workspace {
@JsonProperty("storage_customer_managed_key_id")
private String storageCustomerManagedKeyId;
+ /** The storage mode of the workspace. */
+ @JsonProperty("storage_mode")
+ private CustomerFacingStorageMode storageMode;
+
/** A unique integer ID for the workspace */
@JsonProperty("workspace_id")
private Long workspaceId;
@@ -126,7 +137,7 @@ public class Workspace {
@JsonProperty("workspace_name")
private String workspaceName;
- /** */
+ /** The status of a workspace */
@JsonProperty("workspace_status")
private WorkspaceStatus workspaceStatus;
@@ -179,6 +190,15 @@ public CloudResourceContainer getCloudResourceContainer() {
return cloudResourceContainer;
}
+ public Workspace setComputeMode(CustomerFacingComputeMode computeMode) {
+ this.computeMode = computeMode;
+ return this;
+ }
+
+ public CustomerFacingComputeMode getComputeMode() {
+ return computeMode;
+ }
+
public Workspace setCreationTime(Long creationTime) {
this.creationTime = creationTime;
return this;
@@ -215,13 +235,13 @@ public String getDeploymentName() {
return deploymentName;
}
- public Workspace setExternalCustomerInfo(ExternalCustomerInfo externalCustomerInfo) {
- this.externalCustomerInfo = externalCustomerInfo;
+ public Workspace setExpectedWorkspaceStatus(WorkspaceStatus expectedWorkspaceStatus) {
+ this.expectedWorkspaceStatus = expectedWorkspaceStatus;
return this;
}
- public ExternalCustomerInfo getExternalCustomerInfo() {
- return externalCustomerInfo;
+ public WorkspaceStatus getExpectedWorkspaceStatus() {
+ return expectedWorkspaceStatus;
}
public Workspace setGcpManagedNetworkConfig(GcpManagedNetworkConfig gcpManagedNetworkConfig) {
@@ -242,15 +262,6 @@ public GkeConfig getGkeConfig() {
return gkeConfig;
}
- public Workspace setIsNoPublicIpEnabled(Boolean isNoPublicIpEnabled) {
- this.isNoPublicIpEnabled = isNoPublicIpEnabled;
- return this;
- }
-
- public Boolean getIsNoPublicIpEnabled() {
- return isNoPublicIpEnabled;
- }
-
public Workspace setLocation(String location) {
this.location = location;
return this;
@@ -270,6 +281,24 @@ public String getManagedServicesCustomerManagedKeyId() {
return managedServicesCustomerManagedKeyId;
}
+ public Workspace setNetwork(WorkspaceNetwork network) {
+ this.network = network;
+ return this;
+ }
+
+ public WorkspaceNetwork getNetwork() {
+ return network;
+ }
+
+ public Workspace setNetworkConnectivityConfigId(String networkConnectivityConfigId) {
+ this.networkConnectivityConfigId = networkConnectivityConfigId;
+ return this;
+ }
+
+ public String getNetworkConnectivityConfigId() {
+ return networkConnectivityConfigId;
+ }
+
public Workspace setNetworkId(String networkId) {
this.networkId = networkId;
return this;
@@ -315,6 +344,15 @@ public String getStorageCustomerManagedKeyId() {
return storageCustomerManagedKeyId;
}
+ public Workspace setStorageMode(CustomerFacingStorageMode storageMode) {
+ this.storageMode = storageMode;
+ return this;
+ }
+
+ public CustomerFacingStorageMode getStorageMode() {
+ return storageMode;
+ }
+
public Workspace setWorkspaceId(Long workspaceId) {
this.workspaceId = workspaceId;
return this;
@@ -361,22 +399,25 @@ public boolean equals(Object o) {
&& Objects.equals(azureWorkspaceInfo, that.azureWorkspaceInfo)
&& Objects.equals(cloud, that.cloud)
&& Objects.equals(cloudResourceContainer, that.cloudResourceContainer)
+ && Objects.equals(computeMode, that.computeMode)
&& Objects.equals(creationTime, that.creationTime)
&& Objects.equals(credentialsId, that.credentialsId)
&& Objects.equals(customTags, that.customTags)
&& Objects.equals(deploymentName, that.deploymentName)
- && Objects.equals(externalCustomerInfo, that.externalCustomerInfo)
+ && Objects.equals(expectedWorkspaceStatus, that.expectedWorkspaceStatus)
&& Objects.equals(gcpManagedNetworkConfig, that.gcpManagedNetworkConfig)
&& Objects.equals(gkeConfig, that.gkeConfig)
- && Objects.equals(isNoPublicIpEnabled, that.isNoPublicIpEnabled)
&& Objects.equals(location, that.location)
&& Objects.equals(
managedServicesCustomerManagedKeyId, that.managedServicesCustomerManagedKeyId)
+ && Objects.equals(network, that.network)
+ && Objects.equals(networkConnectivityConfigId, that.networkConnectivityConfigId)
&& Objects.equals(networkId, that.networkId)
&& Objects.equals(pricingTier, that.pricingTier)
&& Objects.equals(privateAccessSettingsId, that.privateAccessSettingsId)
&& Objects.equals(storageConfigurationId, that.storageConfigurationId)
&& Objects.equals(storageCustomerManagedKeyId, that.storageCustomerManagedKeyId)
+ && Objects.equals(storageMode, that.storageMode)
&& Objects.equals(workspaceId, that.workspaceId)
&& Objects.equals(workspaceName, that.workspaceName)
&& Objects.equals(workspaceStatus, that.workspaceStatus)
@@ -391,21 +432,24 @@ public int hashCode() {
azureWorkspaceInfo,
cloud,
cloudResourceContainer,
+ computeMode,
creationTime,
credentialsId,
customTags,
deploymentName,
- externalCustomerInfo,
+ expectedWorkspaceStatus,
gcpManagedNetworkConfig,
gkeConfig,
- isNoPublicIpEnabled,
location,
managedServicesCustomerManagedKeyId,
+ network,
+ networkConnectivityConfigId,
networkId,
pricingTier,
privateAccessSettingsId,
storageConfigurationId,
storageCustomerManagedKeyId,
+ storageMode,
workspaceId,
workspaceName,
workspaceStatus,
@@ -420,21 +464,24 @@ public String toString() {
.add("azureWorkspaceInfo", azureWorkspaceInfo)
.add("cloud", cloud)
.add("cloudResourceContainer", cloudResourceContainer)
+ .add("computeMode", computeMode)
.add("creationTime", creationTime)
.add("credentialsId", credentialsId)
.add("customTags", customTags)
.add("deploymentName", deploymentName)
- .add("externalCustomerInfo", externalCustomerInfo)
+ .add("expectedWorkspaceStatus", expectedWorkspaceStatus)
.add("gcpManagedNetworkConfig", gcpManagedNetworkConfig)
.add("gkeConfig", gkeConfig)
- .add("isNoPublicIpEnabled", isNoPublicIpEnabled)
.add("location", location)
.add("managedServicesCustomerManagedKeyId", managedServicesCustomerManagedKeyId)
+ .add("network", network)
+ .add("networkConnectivityConfigId", networkConnectivityConfigId)
.add("networkId", networkId)
.add("pricingTier", pricingTier)
.add("privateAccessSettingsId", privateAccessSettingsId)
.add("storageConfigurationId", storageConfigurationId)
.add("storageCustomerManagedKeyId", storageCustomerManagedKeyId)
+ .add("storageMode", storageMode)
.add("workspaceId", workspaceId)
.add("workspaceName", workspaceName)
.add("workspaceStatus", workspaceStatus)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java
new file mode 100755
index 000000000..478a1fd93
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java
@@ -0,0 +1,89 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The network configuration for workspaces. */
+@Generated
+public class WorkspaceNetwork {
+ /**
+ * The shared network config for GCP workspace. This object has common network configurations that
+ * are network attributions of a workspace. This object is input-only.
+ */
+ @JsonProperty("gcp_common_network_config")
+ private GcpCommonNetworkConfig gcpCommonNetworkConfig;
+
+ /**
+ * The mutually exclusive network deployment modes. The option decides which network mode the
+ * workspace will use. The network config for GCP workspace with Databricks managed network. This
+ * object is input-only and will not be provided when listing workspaces. See
+ * go/gcp-byovpc-alpha-design for interface decisions.
+ */
+ @JsonProperty("gcp_managed_network_config")
+ private GcpManagedNetworkConfig gcpManagedNetworkConfig;
+
+ /**
+ * The ID of the network object, if the workspace is a BYOVPC workspace. This should apply to
+ * workspaces on all clouds in internal services. In accounts-rest-api, user will use
+ * workspace.network_id for input and output instead. Currently (2021-06-19) the network ID is
+ * only used by GCP.
+ */
+ @JsonProperty("network_id")
+ private String networkId;
+
+ public WorkspaceNetwork setGcpCommonNetworkConfig(GcpCommonNetworkConfig gcpCommonNetworkConfig) {
+ this.gcpCommonNetworkConfig = gcpCommonNetworkConfig;
+ return this;
+ }
+
+ public GcpCommonNetworkConfig getGcpCommonNetworkConfig() {
+ return gcpCommonNetworkConfig;
+ }
+
+ public WorkspaceNetwork setGcpManagedNetworkConfig(
+ GcpManagedNetworkConfig gcpManagedNetworkConfig) {
+ this.gcpManagedNetworkConfig = gcpManagedNetworkConfig;
+ return this;
+ }
+
+ public GcpManagedNetworkConfig getGcpManagedNetworkConfig() {
+ return gcpManagedNetworkConfig;
+ }
+
+ public WorkspaceNetwork setNetworkId(String networkId) {
+ this.networkId = networkId;
+ return this;
+ }
+
+ public String getNetworkId() {
+ return networkId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ WorkspaceNetwork that = (WorkspaceNetwork) o;
+ return Objects.equals(gcpCommonNetworkConfig, that.gcpCommonNetworkConfig)
+ && Objects.equals(gcpManagedNetworkConfig, that.gcpManagedNetworkConfig)
+ && Objects.equals(networkId, that.networkId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(gcpCommonNetworkConfig, gcpManagedNetworkConfig, networkId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(WorkspaceNetwork.class)
+ .add("gcpCommonNetworkConfig", gcpCommonNetworkConfig)
+ .add("gcpManagedNetworkConfig", gcpManagedNetworkConfig)
+ .add("networkId", networkId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java
index c6614660a..628d49e35 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java
@@ -5,8 +5,12 @@
import com.databricks.sdk.support.Generated;
/**
- * The status of the workspace. For workspace creation, usually it is set to `PROVISIONING`
- * initially. Continue to check the status until the status is `RUNNING`.
+ * The different statuses of a workspace. The following represents the current set of valid
+ * transitions from status to status: NOT_PROVISIONED -> PROVISIONING -> CANCELLED PROVISIONING ->
+ * RUNNING -> FAILED -> CANCELLED (note that this transition is disallowed in the MultiWorkspace
+ * Project) RUNNING -> PROVISIONING -> BANNED -> CANCELLED FAILED -> PROVISIONING -> CANCELLED
+ * BANNED -> RUNNING -> CANCELLED Note that a transition from any state to itself is also valid.
+ * TODO(PLAT-5867): add a transition from CANCELLED to some other value (e.g. RECOVERING)
*/
@Generated
public enum WorkspaceStatus {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java
index 879c61231..479e91661 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java
@@ -83,14 +83,38 @@ public Workspace waitGetWorkspaceRunning(
}
/**
- * Creates a new workspace.
+ * Creates a new workspace using a credential configuration and a storage configuration, an
+ * optional network configuration (if using a customer-managed VPC), an optional managed services
+ * key configuration (if using customer-managed keys for managed services), and an optional
+ * storage key configuration (if using customer-managed keys for storage). The key configurations
+ * used for managed services and storage encryption can be the same or different.
+ *
+ * Important: This operation is asynchronous. A response with HTTP status code 200 means the
+ * request has been accepted and is in progress, but does not mean that the workspace deployed
+ * successfully and is running. The initial workspace status is typically PROVISIONING. Use the
+ * workspace ID (workspace_id) field in the response to identify the new workspace and make
+ * repeated GET requests with the workspace ID and check its status. The workspace becomes
+ * available when the status changes to RUNNING.
+ *
+ * You can share one customer-managed VPC with multiple workspaces in a single account. It is
+ * not required to create a new VPC for each workspace. However, you cannot reuse subnets or
+ * Security Groups between workspaces. If you plan to share one VPC with multiple workspaces, make
+ * sure you size your VPC and subnets accordingly. Because a Databricks Account API network
+ * configuration encapsulates this information, you cannot reuse a Databricks Account API network
+ * configuration across workspaces.
+ *
+ * For information about how to create a new workspace with this API including error handling,
+ * see [Create a new workspace using the Account API].
+ *
+ * Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a
+ * limited set of deployment and subscription types. If you have questions about availability,
+ * contact your Databricks representative.
*
- * **Important**: This operation is asynchronous. A response with HTTP status code 200 means
- * the request has been accepted and is in progress, but does not mean that the workspace deployed
- * successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the
- * workspace ID (`workspace_id`) field in the response to identify the new workspace and make
- * repeated `GET` requests with the workspace ID and check its status. The workspace becomes
- * available when the status changes to `RUNNING`.
+ * This operation is available only if your account is on the E2 version of the platform or on
+ * a select custom plan that allows multiple workspaces per account.
+ *
+ * [Create a new workspace using the Account API]:
+ * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
*/
public Wait This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- */
- public void delete(DeleteWorkspaceRequest request) {
- impl.delete(request);
+ /** Deletes a Databricks workspace, both specified by ID. */
+ public Workspace delete(DeleteWorkspaceRequest request) {
+ return impl.delete(request);
}
public Workspace get(long workspaceId) {
@@ -124,13 +141,9 @@ public Workspace get(long workspaceId) {
* Gets information including status for a Databricks workspace, specified by ID. In the response,
* the `workspace_status` field indicates the current status. After initial workspace creation
* (which is asynchronous), make repeated `GET` requests with the workspace ID and check its
- * status. The workspace becomes available when the status changes to `RUNNING`.
- *
- * For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
+ * status. The workspace becomes available when the status changes to `RUNNING`. For information
+ * about how to create a new workspace with this API **including error handling**, see [Create a
+ * new workspace using the Account API].
*
* [Create a new workspace using the Account API]:
* http://docs.databricks.com/administration-guide/account-api/new-workspace.html
@@ -139,124 +152,18 @@ public Workspace get(GetWorkspaceRequest request) {
return impl.get(request);
}
- /**
- * Gets a list of all workspaces associated with an account, specified by ID.
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- */
+ /** Lists Databricks workspaces for an account. */
public Iterable ### Update a failed workspace You can update a Databricks workspace configuration for failed
- * workspace deployment for some fields, but not all fields. For a failed workspace, this request
- * supports updates to the following fields only: - Credential configuration ID - Storage
- * configuration ID - Network configuration ID. Used only to add or change a network configuration
- * for a customer-managed VPC. For a failed workspace only, you can convert a workspace with
- * Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a
- * workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the
- * network configuration for a failed or running workspace to add PrivateLink support, though you
- * must also add a private access settings object. - Key configuration ID for managed services
- * (control plane storage, such as notebook source and Databricks SQL queries). Used only if you
- * use customer-managed keys for managed services. - Key configuration ID for workspace storage
- * (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for
- * workspace storage. **Important**: If the workspace was ever in the running state, even if
- * briefly before becoming a failed workspace, you cannot add a new key configuration ID for
- * workspace storage. - Private access settings ID to add PrivateLink support. You can add or
- * update the private access settings ID to upgrade a workspace to add support for front-end,
- * back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end
- * or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty
- * custom tags, the update would not be applied. - Network connectivity configuration ID to add
- * serverless stable IP support. You can add or update the network connectivity configuration ID
- * to ensure the workspace uses the same set of stable IP CIDR blocks to access your resources.
- * You cannot remove a network connectivity configuration from the workspace once attached, you
- * can only switch to another one.
- *
- * After calling the `PATCH` operation to update the workspace configuration, make repeated
- * `GET` requests with the workspace ID and check the workspace status. The workspace is
- * successful if the status changes to `RUNNING`.
- *
- * For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- * ### Update a running workspace You can update a Databricks workspace configuration for
- * running workspaces for some fields, but not all fields. For a running workspace, this request
- * supports updating the following fields only: - Credential configuration ID - Network
- * configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a
- * running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a
- * network configuration update in this API for a failed or running workspace to add support for
- * PrivateLink, although you also need to add a private access settings object. - Key
- * configuration ID for managed services (control plane storage, such as notebook source and
- * Databricks SQL queries). Databricks does not directly encrypt the data with the
- * customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK)
- * that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the
- * DEK to encrypt your workspace's managed services persisted data. If the workspace does not
- * already have a CMK for managed services, adding this ID enables managed services encryption for
- * new or updated data. Existing managed services data that existed before adding the key remains
- * not encrypted with the DEK until it is modified. If the workspace already has customer-managed
- * keys for managed services, this request rotates (changes) the CMK keys and the DEK is
- * re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root
- * S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not
- * already have a customer-managed key configuration for workspace storage. - Private access
- * settings ID to add PrivateLink support. You can add or update the private access settings ID to
- * upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You
- * cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a
- * workspace. - Custom tags. Given you provide an empty custom tags, the update would not be
- * applied. - Network connectivity configuration ID to add serverless stable IP support. You can
- * add or update the network connectivity configuration ID to ensure the workspace uses the same
- * set of stable IP CIDR blocks to access your resources. You cannot remove a network connectivity
- * configuration from the workspace once attached, you can only switch to another one.
- *
- * **Important**: To update a running workspace, your workspace must have no running compute
- * resources that run in your workspace's VPC in the Classic data plane. For example, stop all
- * all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If
- * you do not terminate all cluster instances in the workspace before calling this API, the
- * request will fail.
- *
- * ### Wait until changes take effect. After calling the `PATCH` operation to update the
- * workspace configuration, make repeated `GET` requests with the workspace ID and check the
- * workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC,
- * the workspace status becomes `PROVISIONING` temporarily (typically under 20 minutes). If the
- * workspace update is successful, the workspace status changes to `RUNNING`. Note that you can
- * also check the workspace status in the [Account Console]. However, you cannot use or create
- * clusters for another 20 minutes after that status change. This results in a total of up to 40
- * minutes in which you cannot create clusters. If you create or use clusters before this time
- * interval elapses, clusters do not launch successfully, fail, or could cause other unexpected
- * behavior. * For workspaces with a customer-managed VPC, the workspace status stays at status
- * `RUNNING` and the VPC change happens immediately. A change to the storage customer-managed key
- * configuration ID might take a few minutes to update, so continue to check the workspace until
- * you observe that it has been updated. If the update fails, the workspace might revert silently
- * to its original configuration. After the workspace has been updated, you cannot use or create
- * clusters for another 20 minutes. If you create or use clusters before this time interval
- * elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.
- *
- * If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the
- * changes to fully take effect. During the 20 minute wait, it is important that you stop all REST
- * API calls to the DBFS API. If you are modifying _only the managed services key configuration_,
- * you can omit the 20 minute wait.
- *
- * **Important**: Customer-managed keys and customer-managed VPCs are supported by only some
- * deployment types and subscription types. If you have questions about availability, contact your
- * Databricks representative.
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- *
- * [Account Console]:
- * https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
- * [Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
- public Wait **Important**: This operation is asynchronous. A response with HTTP status code 200 means
- * the request has been accepted and is in progress, but does not mean that the workspace deployed
- * successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the
- * workspace ID (`workspace_id`) field in the response to identify the new workspace and make
- * repeated `GET` requests with the workspace ID and check its status. The workspace becomes
- * available when the status changes to `RUNNING`.
- */
- Workspace create(CreateWorkspaceRequest createWorkspaceRequest);
-
- /**
- * Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate.
- * However, it might take a few minutes for all workspaces resources to be deleted, depending on
- * the size and number of workspace resources.
+ * Creates a new workspace using a credential configuration and a storage configuration, an
+ * optional network configuration (if using a customer-managed VPC), an optional managed services
+ * key configuration (if using customer-managed keys for managed services), and an optional
+ * storage key configuration (if using customer-managed keys for storage). The key configurations
+ * used for managed services and storage encryption can be the same or different.
+ *
+ * Important: This operation is asynchronous. A response with HTTP status code 200 means the
+ * request has been accepted and is in progress, but does not mean that the workspace deployed
+ * successfully and is running. The initial workspace status is typically PROVISIONING. Use the
+ * workspace ID (workspace_id) field in the response to identify the new workspace and make
+ * repeated GET requests with the workspace ID and check its status. The workspace becomes
+ * available when the status changes to RUNNING.
+ *
+ * You can share one customer-managed VPC with multiple workspaces in a single account. It is
+ * not required to create a new VPC for each workspace. However, you cannot reuse subnets or
+ * Security Groups between workspaces. If you plan to share one VPC with multiple workspaces, make
+ * sure you size your VPC and subnets accordingly. Because a Databricks Account API network
+ * configuration encapsulates this information, you cannot reuse a Databricks Account API network
+ * configuration across workspaces.
+ *
+ * For information about how to create a new workspace with this API including error handling,
+ * see [Create a new workspace using the Account API].
+ *
+ * Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a
+ * limited set of deployment and subscription types. If you have questions about availability,
+ * contact your Databricks representative.
*
* This operation is available only if your account is on the E2 version of the platform or on
* a select custom plan that allows multiple workspaces per account.
+ *
+ * [Create a new workspace using the Account API]:
+ * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
*/
- void delete(DeleteWorkspaceRequest deleteWorkspaceRequest);
+ Workspace create(CreateWorkspaceRequest createWorkspaceRequest);
+
+ /** Deletes a Databricks workspace, both specified by ID. */
+ Workspace delete(DeleteWorkspaceRequest deleteWorkspaceRequest);
/**
* Gets information including status for a Databricks workspace, specified by ID. In the response,
* the `workspace_status` field indicates the current status. After initial workspace creation
* (which is asynchronous), make repeated `GET` requests with the workspace ID and check its
- * status. The workspace becomes available when the status changes to `RUNNING`.
- *
- * For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
+ * status. The workspace becomes available when the status changes to `RUNNING`. For information
+ * about how to create a new workspace with this API **including error handling**, see [Create a
+ * new workspace using the Account API].
*
* [Create a new workspace using the Account API]:
* http://docs.databricks.com/administration-guide/account-api/new-workspace.html
*/
Workspace get(GetWorkspaceRequest getWorkspaceRequest);
- /**
- * Gets a list of all workspaces associated with an account, specified by ID.
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- */
+ /** Lists Databricks workspaces for an account. */
Collection ### Update a failed workspace You can update a Databricks workspace configuration for failed
- * workspace deployment for some fields, but not all fields. For a failed workspace, this request
- * supports updates to the following fields only: - Credential configuration ID - Storage
- * configuration ID - Network configuration ID. Used only to add or change a network configuration
- * for a customer-managed VPC. For a failed workspace only, you can convert a workspace with
- * Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a
- * workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the
- * network configuration for a failed or running workspace to add PrivateLink support, though you
- * must also add a private access settings object. - Key configuration ID for managed services
- * (control plane storage, such as notebook source and Databricks SQL queries). Used only if you
- * use customer-managed keys for managed services. - Key configuration ID for workspace storage
- * (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for
- * workspace storage. **Important**: If the workspace was ever in the running state, even if
- * briefly before becoming a failed workspace, you cannot add a new key configuration ID for
- * workspace storage. - Private access settings ID to add PrivateLink support. You can add or
- * update the private access settings ID to upgrade a workspace to add support for front-end,
- * back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end
- * or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty
- * custom tags, the update would not be applied. - Network connectivity configuration ID to add
- * serverless stable IP support. You can add or update the network connectivity configuration ID
- * to ensure the workspace uses the same set of stable IP CIDR blocks to access your resources.
- * You cannot remove a network connectivity configuration from the workspace once attached, you
- * can only switch to another one.
- *
- * After calling the `PATCH` operation to update the workspace configuration, make repeated
- * `GET` requests with the workspace ID and check the workspace status. The workspace is
- * successful if the status changes to `RUNNING`.
- *
- * For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- * ### Update a running workspace You can update a Databricks workspace configuration for
- * running workspaces for some fields, but not all fields. For a running workspace, this request
- * supports updating the following fields only: - Credential configuration ID - Network
- * configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a
- * running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a
- * network configuration update in this API for a failed or running workspace to add support for
- * PrivateLink, although you also need to add a private access settings object. - Key
- * configuration ID for managed services (control plane storage, such as notebook source and
- * Databricks SQL queries). Databricks does not directly encrypt the data with the
- * customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK)
- * that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the
- * DEK to encrypt your workspace's managed services persisted data. If the workspace does not
- * already have a CMK for managed services, adding this ID enables managed services encryption for
- * new or updated data. Existing managed services data that existed before adding the key remains
- * not encrypted with the DEK until it is modified. If the workspace already has customer-managed
- * keys for managed services, this request rotates (changes) the CMK keys and the DEK is
- * re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root
- * S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not
- * already have a customer-managed key configuration for workspace storage. - Private access
- * settings ID to add PrivateLink support. You can add or update the private access settings ID to
- * upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You
- * cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a
- * workspace. - Custom tags. Given you provide an empty custom tags, the update would not be
- * applied. - Network connectivity configuration ID to add serverless stable IP support. You can
- * add or update the network connectivity configuration ID to ensure the workspace uses the same
- * set of stable IP CIDR blocks to access your resources. You cannot remove a network connectivity
- * configuration from the workspace once attached, you can only switch to another one.
- *
- * **Important**: To update a running workspace, your workspace must have no running compute
- * resources that run in your workspace's VPC in the Classic data plane. For example, stop all
- * all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If
- * you do not terminate all cluster instances in the workspace before calling this API, the
- * request will fail.
- *
- * ### Wait until changes take effect. After calling the `PATCH` operation to update the
- * workspace configuration, make repeated `GET` requests with the workspace ID and check the
- * workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC,
- * the workspace status becomes `PROVISIONING` temporarily (typically under 20 minutes). If the
- * workspace update is successful, the workspace status changes to `RUNNING`. Note that you can
- * also check the workspace status in the [Account Console]. However, you cannot use or create
- * clusters for another 20 minutes after that status change. This results in a total of up to 40
- * minutes in which you cannot create clusters. If you create or use clusters before this time
- * interval elapses, clusters do not launch successfully, fail, or could cause other unexpected
- * behavior. * For workspaces with a customer-managed VPC, the workspace status stays at status
- * `RUNNING` and the VPC change happens immediately. A change to the storage customer-managed key
- * configuration ID might take a few minutes to update, so continue to check the workspace until
- * you observe that it has been updated. If the update fails, the workspace might revert silently
- * to its original configuration. After the workspace has been updated, you cannot use or create
- * clusters for another 20 minutes. If you create or use clusters before this time interval
- * elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.
- *
- * If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the
- * changes to fully take effect. During the 20 minute wait, it is important that you stop all REST
- * API calls to the DBFS API. If you are modifying _only the managed services key configuration_,
- * you can omit the 20 minute wait.
- *
- * **Important**: Customer-managed keys and customer-managed VPCs are supported by only some
- * deployment types and subscription types. If you have questions about availability, contact your
- * Databricks representative.
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- *
- * [Account Console]:
- * https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
- * [Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
- void update(UpdateWorkspaceRequest updateWorkspaceRequest);
+ /** Updates a workspace. */
+ Workspace update(UpdateWorkspaceRequest updateWorkspaceRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java
index ce08dd45a..509c6bbf1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java
@@ -252,6 +252,12 @@ public Wait Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins
- * for non-serverless warehouses - 0 indicates no autostop.
+ * Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
*
* Defaults to 120 mins
*/
@@ -59,7 +59,7 @@ public class CreateWarehouseRequest {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- * Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ * Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
* Defaults to min_clusters if unset.
*/
@@ -87,7 +87,7 @@ public class CreateWarehouseRequest {
@JsonProperty("name")
private String name;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
@@ -100,7 +100,10 @@ public class CreateWarehouseRequest {
@JsonProperty("tags")
private EndpointTags tags;
- /** */
+ /**
+ * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to
+ * `PRO` and also set the field `enable_serverless_compute` to `true`.
+ */
@JsonProperty("warehouse_type")
private CreateWarehouseRequestWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java
index f0b104ca6..03a31eee6 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum CreateWarehouseRequestWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java
index d07e8e25b..251b1555b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java
@@ -8,6 +8,11 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/**
+ * This is an incremental edit functionality, so all fields except id are optional. If a field is
+ * set, the corresponding configuration in the SQL warehouse is modified. If a field is unset, the
+ * existing configuration value in the SQL warehouse is retained. Thus, this API is not idempotent.
+ */
@Generated
public class EditWarehouseRequest {
/**
@@ -48,7 +53,7 @@ public class EditWarehouseRequest {
@JsonProperty("enable_photon")
private Boolean enablePhoton;
- /** Configures whether the warehouse should use serverless compute. */
+ /** Configures whether the warehouse should use serverless compute */
@JsonProperty("enable_serverless_compute")
private Boolean enableServerlessCompute;
@@ -62,7 +67,7 @@ public class EditWarehouseRequest {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- * Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ * Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
* Defaults to min_clusters if unset.
*/
@@ -90,7 +95,7 @@ public class EditWarehouseRequest {
@JsonProperty("name")
private String name;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
@@ -103,7 +108,10 @@ public class EditWarehouseRequest {
@JsonProperty("tags")
private EndpointTags tags;
- /** */
+ /**
+ * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to
+ * `PRO` and also set the field `enable_serverless_compute` to `true`.
+ */
@JsonProperty("warehouse_type")
private EditWarehouseRequestWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java
index 5c4337caa..b180bfd82 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum EditWarehouseRequestWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java
index 243675c94..5caff457b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java
@@ -24,7 +24,7 @@ public class EndpointHealth {
@JsonProperty("message")
private String message;
- /** */
+ /** Health status of the endpoint. */
@JsonProperty("status")
private Status status;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java
index 92868d39b..03ae34840 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java
@@ -70,7 +70,7 @@ public class EndpointInfo {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- * Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ * Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
* Defaults to min_clusters if unset.
*/
@@ -110,11 +110,11 @@ public class EndpointInfo {
@JsonProperty("odbc_params")
private OdbcParams odbcParams;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
- /** */
+ /** state of the endpoint */
@JsonProperty("state")
private State state;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java
index 320369adf..498289a1d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum EndpointInfoWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java
index cc6231890..5aa72af6c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java
@@ -15,7 +15,7 @@ public class ExecuteStatementRequest {
* data representations and might not match the final size in the requested `format`. If the
* result was truncated due to the byte limit, then `truncated` in the response is set to `true`.
* When using `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is applied if
- * `byte_limit` is not explcitly set.
+ * `byte_limit` is not explicitly set.
*/
@JsonProperty("byte_limit")
private Long byteLimit;
@@ -29,7 +29,29 @@ public class ExecuteStatementRequest {
@JsonProperty("catalog")
private String catalog;
- /** */
+ /**
+ * The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`.
+ *
+ * Statements executed with `INLINE` disposition will return result data inline, in
+ * `JSON_ARRAY` format, in a series of chunks. If a given statement produces a result set with a
+ * size larger than 25 MiB, that statement execution is aborted, and no result set will be
+ * available.
+ *
+ * **NOTE** Byte limits are computed based upon internal representations of the result set
+ * data, and might not match the sizes visible in JSON responses.
+ *
+ * Statements executed with `EXTERNAL_LINKS` disposition will return result data as external
+ * links: URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS`
+ * disposition allows statements to generate arbitrarily sized result sets for fetching up to 100
+ * GiB. The resulting links have two important properties:
+ *
+ * 1. They point to resources _external_ to the Databricks compute; therefore any associated
+ * authentication information (typically a personal access token, OAuth token, or similar) _must
+ * be removed_ when fetching from these links.
+ *
+ * 2. These are URLs with a specific expiration, indicated in the response. The behavior when
+ * attempting to use an expired link is cloud specific.
+ */
@JsonProperty("disposition")
private Disposition disposition;
@@ -93,13 +115,13 @@ public class ExecuteStatementRequest {
*
* For example, the following statement contains two parameters, `my_name` and `my_date`:
*
- * SELECT * FROM my_table WHERE name = :my_name AND date = :my_date
+ * ``` SELECT * FROM my_table WHERE name = :my_name AND date = :my_date ```
*
* The parameters can be passed in the request body as follows:
*
- * { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date",
+ * ` { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date",
* "parameters": [ { "name": "my_name", "value": "the name" }, { "name": "my_date", "value":
- * "2020-01-01", "type": "DATE" } ] }
+ * "2020-01-01", "type": "DATE" } ] } `
*
* Currently, positional parameters denoted by a `?` marker are not supported by the Databricks
* SQL Statement Execution API.
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java
index 1b88216f2..569281981 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java
@@ -28,7 +28,11 @@ public class ExternalLink {
@JsonProperty("expiration")
private String expiration;
- /** */
+ /**
+ * A URL pointing to a chunk of result data, hosted by an external service, with a short
+ * expiration time (<= 15 minutes). As this URL contains a temporary credential, it should be
+ * considered sensitive and the client should not expose this URL in a log.
+ */
@JsonProperty("external_link")
private String externalLink;
@@ -44,7 +48,7 @@ public class ExternalLink {
/**
* When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are
* no more chunks. The next chunk can be fetched with a
- * :method:statementexecution/getStatementResultChunkN request.
+ * :method:statementexecution/getstatementresultchunkn request.
*/
@JsonProperty("next_chunk_index")
private Long nextChunkIndex;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java
index e7cb13ba1..6a789a483 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java
@@ -70,7 +70,7 @@ public class GetWarehouseResponse {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- * Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ * Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
* Defaults to min_clusters if unset.
*/
@@ -110,11 +110,11 @@ public class GetWarehouseResponse {
@JsonProperty("odbc_params")
private OdbcParams odbcParams;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
- /** */
+ /** state of the endpoint */
@JsonProperty("state")
private State state;
@@ -127,7 +127,10 @@ public class GetWarehouseResponse {
@JsonProperty("tags")
private EndpointTags tags;
- /** */
+ /**
+ * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to
+ * `PRO` and also set the field `enable_serverless_compute` to `true`.
+ */
@JsonProperty("warehouse_type")
private GetWarehouseResponseWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java
index 7e1ada451..ea11574db 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum GetWarehouseResponseWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java
index 17524f5d2..50913917d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java
@@ -25,6 +25,10 @@ public class GetWorkspaceWarehouseConfigResponse {
@JsonProperty("data_access_config")
private Collection When paginating, all other parameters provided to `ListWarehouses` must match the call that
+ * provided the page token.
+ */
+ @JsonIgnore
+ @QueryParam("page_token")
+ private String pageToken;
+
/**
- * Service Principal which will be used to fetch the list of warehouses. If not specified, the
- * user from the session header is used.
+ * Service Principal which will be used to fetch the list of endpoints. If not specified, SQL
+ * Gateway will use the user from the session header.
*/
@JsonIgnore
@QueryParam("run_as_user_id")
private Long runAsUserId;
+ public ListWarehousesRequest setPageSize(Long pageSize) {
+ this.pageSize = pageSize;
+ return this;
+ }
+
+ public Long getPageSize() {
+ return pageSize;
+ }
+
+ public ListWarehousesRequest setPageToken(String pageToken) {
+ this.pageToken = pageToken;
+ return this;
+ }
+
+ public String getPageToken() {
+ return pageToken;
+ }
+
public ListWarehousesRequest setRunAsUserId(Long runAsUserId) {
this.runAsUserId = runAsUserId;
return this;
@@ -32,16 +66,22 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListWarehousesRequest that = (ListWarehousesRequest) o;
- return Objects.equals(runAsUserId, that.runAsUserId);
+ return Objects.equals(pageSize, that.pageSize)
+ && Objects.equals(pageToken, that.pageToken)
+ && Objects.equals(runAsUserId, that.runAsUserId);
}
@Override
public int hashCode() {
- return Objects.hash(runAsUserId);
+ return Objects.hash(pageSize, pageToken, runAsUserId);
}
@Override
public String toString() {
- return new ToStringer(ListWarehousesRequest.class).add("runAsUserId", runAsUserId).toString();
+ return new ToStringer(ListWarehousesRequest.class)
+ .add("pageSize", pageSize)
+ .add("pageToken", pageToken)
+ .add("runAsUserId", runAsUserId)
+ .toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java
index 53eb8c282..8f2aed6f3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java
@@ -10,10 +10,26 @@
@Generated
public class ListWarehousesResponse {
+ /**
+ * A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted,
+ * there are no subsequent pages.
+ */
+ @JsonProperty("next_page_token")
+ private String nextPageToken;
+
/** A list of warehouses and their configurations. */
@JsonProperty("warehouses")
private Collection This is idempotent.
+ */
@Generated
public class SetWorkspaceWarehouseConfigRequest {
/** Optional: Channel selection details */
@@ -25,6 +31,10 @@ public class SetWorkspaceWarehouseConfigRequest {
@JsonProperty("data_access_config")
private Collection The breakdown of how the EndpointSpotInstancePolicy converts to per cloud configurations is:
+ *
+ * +-------+--------------------------------------+--------------------------------+ | Cloud |
+ * COST_OPTIMIZED | RELIABILITY_OPTIMIZED |
+ * +-------+--------------------------------------+--------------------------------+ | AWS | On
+ * Demand Driver with Spot Executors | On Demand Driver and Executors | | AZURE | On Demand Driver
+ * and Executors | On Demand Driver and Executors |
+ * +-------+--------------------------------------+--------------------------------+
+ *
+ * While including "spot" in the enum name may limit the the future extensibility of this field
+ * because it limits this enum to denoting "spot or not", this is the field that PM recommends after
+ * discussion with customers per SC-48783.
+ */
@Generated
public enum SpotInstancePolicy {
COST_OPTIMIZED,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java
index 64dde2ce5..c6f05715e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
-/** State of the warehouse */
+/** * State of a warehouse. */
@Generated
public enum State {
DELETED,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java
index 16b6fa0b0..c47eed4aa 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java
@@ -33,18 +33,19 @@
* yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can
* be set to `CANCEL`, which cancels the statement.
*
- * In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call
+ * In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call
* waits up to 30 seconds; if the statement execution finishes within this time, the result data is
* returned directly in the response. If the execution takes longer than 30 seconds, the execution
- * is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s`
- * (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns
- * directly with a statement ID. The status of the statement execution can be polled by issuing
- * :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded,
- * this call also returns the result and metadata in the response. - Hybrid mode (default) -
- * `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the
- * statement execution finishes within this time, the result data is returned directly in the
- * response. If the execution takes longer than 10 seconds, a statement ID is returned. The
- * statement ID can be used to fetch status and results in the same way as in the asynchronous mode.
+ * is canceled and the call returns with a `CANCELED` state. - **Asynchronous mode**
+ * (`wait_timeout=0s` and `on_wait_timeout` is ignored): The call doesn't wait for the statement to
+ * finish but returns directly with a statement ID. The status of the statement execution can be
+ * polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
+ * execution has succeeded, this call also returns the result and metadata in the response. -
+ * **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits for
+ * up to 10 seconds; if the statement execution finishes within this time, the result data is
+ * returned directly in the response. If the execution takes longer than 10 seconds, a statement ID
+ * is returned. The statement ID can be used to fetch status and results in the same way as in the
+ * asynchronous mode.
*
* Depending on the size, the result can be split into multiple chunks. If the statement
* execution is successful, the statement response contains a manifest and the first chunk of the
@@ -114,13 +115,61 @@ public StatementExecutionAPI(StatementExecutionService mock) {
/**
* Requests that an executing statement be canceled. Callers must poll for status to see the
- * terminal state.
+ * terminal state. Cancel response is empty; receiving response indicates successful receipt.
*/
public void cancelExecution(CancelExecutionRequest request) {
impl.cancelExecution(request);
}
- /** Execute a SQL statement */
+ /**
+ * Execute a SQL statement and optionally await its results for a specified time.
+ *
+ * **Use case: small result sets with INLINE + JSON_ARRAY**
+ *
+ * For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of
+ * `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.
+ *
+ * **Use case: large result sets with EXTERNAL_LINKS**
+ *
+ * Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets
+ * efficiently. The main differences from using `INLINE` disposition are that the result data is
+ * accessed with URLs, and that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and
+ * `CSV` compared to only `JSON_ARRAY` with `INLINE`.
+ *
+ * ** URLs**
+ *
+ * External links point to data stored within your workspace's internal storage, in the form of
+ * a URL. The URLs are valid for only a short period, <= 15 minutes. Alongside each
+ * `external_link` is an expiration field indicating the time at which the URL is no longer valid.
+ * In `EXTERNAL_LINKS` mode, chunks can be resolved and fetched multiple times and in parallel.
+ *
+ * ----
+ *
+ * ### **Warning: Databricks strongly recommends that you protect the URLs that are returned by
+ * the `EXTERNAL_LINKS` disposition.**
+ *
+ * When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be
+ * used to download the results directly from . As a short-lived is embedded in this URL, you
+ * should protect the URL.
+ *
+ * Because URLs are already generated with embedded temporary s, you must not set an
+ * `Authorization` header in the download requests.
+ *
+ * The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case.
+ *
+ * See also [Security best practices].
+ *
+ * ----
+ *
+ * StatementResponse contains `statement_id` and `status`; other fields might be absent or
+ * present depending on context. If the SQL warehouse fails to execute the provided statement, a
+ * 200 response is returned with `status.state` set to `FAILED` (in contrast to a failure when
+ * accepting the request, which results in a non-200 response). Details of the error can be found
+ * at `status.error` in case of execution failures.
+ *
+ * [Security best practices]:
+ * https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices
+ */
public StatementResponse executeStatement(ExecuteStatementRequest request) {
return impl.executeStatement(request);
}
@@ -130,11 +179,13 @@ public StatementResponse getStatement(String statementId) {
}
/**
- * This request can be used to poll for the statement's status. When the `status.state` field is
- * `SUCCEEDED` it will also return the result manifest and the first chunk of the result data.
- * When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP
- * 200 with the state set. After at least 12 hours in terminal state, the statement is removed
- * from the warehouse and further calls will receive an HTTP 404 response.
+ * This request can be used to poll for the statement's status. StatementResponse contains
+ * `statement_id` and `status`; other fields might be absent or present depending on context. When
+ * the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first
+ * chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or
+ * `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state,
+ * the statement is removed from the warehouse and further calls will receive an HTTP 404
+ * response.
*
* **NOTE** This call currently might take up to 5 seconds to get the latest status and result.
*/
@@ -156,7 +207,8 @@ public ResultData getStatementResultChunkN(String statementId, long chunkIndex)
* request can be used to fetch subsequent chunks. The response structure is identical to the
* nested `result` element described in the :method:statementexecution/getStatement request, and
* similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple
- * iteration through the result set.
+ * iteration through the result set. Depending on `disposition`, the response returns chunks of
+ * data either inline, or as links.
*/
public ResultData getStatementResultChunkN(GetStatementResultChunkNRequest request) {
return impl.getStatementResultChunkN(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java
index 4c321af78..763e09ac2 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java
@@ -30,7 +30,7 @@ public void cancelExecution(CancelExecutionRequest request) {
@Override
public StatementResponse executeStatement(ExecuteStatementRequest request) {
- String path = "/api/2.0/sql/statements/";
+ String path = "/api/2.0/sql/statements";
try {
Request req = new Request("POST", path, apiClient.serialize(request));
ApiClient.setQuery(req, request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java
index 50fae0fc5..951af8946 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java
@@ -30,18 +30,19 @@
* yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can
* be set to `CANCEL`, which cancels the statement.
*
- * In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call
+ * In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call
* waits up to 30 seconds; if the statement execution finishes within this time, the result data is
* returned directly in the response. If the execution takes longer than 30 seconds, the execution
- * is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s`
- * (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns
- * directly with a statement ID. The status of the statement execution can be polled by issuing
- * :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded,
- * this call also returns the result and metadata in the response. - Hybrid mode (default) -
- * `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the
- * statement execution finishes within this time, the result data is returned directly in the
- * response. If the execution takes longer than 10 seconds, a statement ID is returned. The
- * statement ID can be used to fetch status and results in the same way as in the asynchronous mode.
+ * is canceled and the call returns with a `CANCELED` state. - **Asynchronous mode**
+ * (`wait_timeout=0s` and `on_wait_timeout` is ignored): The call doesn't wait for the statement to
+ * finish but returns directly with a statement ID. The status of the statement execution can be
+ * polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
+ * execution has succeeded, this call also returns the result and metadata in the response. -
+ * **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits for
+ * up to 10 seconds; if the statement execution finishes within this time, the result data is
+ * returned directly in the response. If the execution takes longer than 10 seconds, a statement ID
+ * is returned. The statement ID can be used to fetch status and results in the same way as in the
+ * asynchronous mode.
*
* Depending on the size, the result can be split into multiple chunks. If the statement
* execution is successful, the statement response contains a manifest and the first chunk of the
@@ -101,19 +102,69 @@
public interface StatementExecutionService {
/**
* Requests that an executing statement be canceled. Callers must poll for status to see the
- * terminal state.
+ * terminal state. Cancel response is empty; receiving response indicates successful receipt.
*/
void cancelExecution(CancelExecutionRequest cancelExecutionRequest);
- /** Execute a SQL statement */
+ /**
+ * Execute a SQL statement and optionally await its results for a specified time.
+ *
+ * **Use case: small result sets with INLINE + JSON_ARRAY**
+ *
+ * For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of
+ * `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.
+ *
+ * **Use case: large result sets with EXTERNAL_LINKS**
+ *
+ * Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets
+ * efficiently. The main differences from using `INLINE` disposition are that the result data is
+ * accessed with URLs, and that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and
+ * `CSV` compared to only `JSON_ARRAY` with `INLINE`.
+ *
+ * ** URLs**
+ *
+ * External links point to data stored within your workspace's internal storage, in the form of
+ * a URL. The URLs are valid for only a short period, <= 15 minutes. Alongside each
+ * `external_link` is an expiration field indicating the time at which the URL is no longer valid.
+ * In `EXTERNAL_LINKS` mode, chunks can be resolved and fetched multiple times and in parallel.
+ *
+ * ----
+ *
+ * ### **Warning: Databricks strongly recommends that you protect the URLs that are returned by
+ * the `EXTERNAL_LINKS` disposition.**
+ *
+ * When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be
+ * used to download the results directly from . As a short-lived is embedded in this URL, you
+ * should protect the URL.
+ *
+ * Because URLs are already generated with embedded temporary s, you must not set an
+ * `Authorization` header in the download requests.
+ *
+ * The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case.
+ *
+ * See also [Security best practices].
+ *
+ * ----
+ *
+ * StatementResponse contains `statement_id` and `status`; other fields might be absent or
+ * present depending on context. If the SQL warehouse fails to execute the provided statement, a
+ * 200 response is returned with `status.state` set to `FAILED` (in contrast to a failure when
+ * accepting the request, which results in a non-200 response). Details of the error can be found
+ * at `status.error` in case of execution failures.
+ *
+ * [Security best practices]:
+ * https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices
+ */
StatementResponse executeStatement(ExecuteStatementRequest executeStatementRequest);
/**
- * This request can be used to poll for the statement's status. When the `status.state` field is
- * `SUCCEEDED` it will also return the result manifest and the first chunk of the result data.
- * When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP
- * 200 with the state set. After at least 12 hours in terminal state, the statement is removed
- * from the warehouse and further calls will receive an HTTP 404 response.
+ * This request can be used to poll for the statement's status. StatementResponse contains
+ * `statement_id` and `status`; other fields might be absent or present depending on context. When
+ * the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first
+ * chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or
+ * `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state,
+ * the statement is removed from the warehouse and further calls will receive an HTTP 404
+ * response.
*
* **NOTE** This call currently might take up to 5 seconds to get the latest status and result.
*/
@@ -126,7 +177,8 @@ public interface StatementExecutionService {
* request can be used to fetch subsequent chunks. The response structure is identical to the
* nested `result` element described in the :method:statementexecution/getStatement request, and
* similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple
- * iteration through the result set.
+ * iteration through the result set. Depending on `disposition`, the response returns chunks of
+ * data either inline, or as links.
*/
ResultData getStatementResultChunkN(
GetStatementResultChunkNRequest getStatementResultChunkNRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java
index 87cf79688..7d1d7a62c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java
@@ -4,21 +4,12 @@
import com.databricks.sdk.support.Generated;
-/**
- * Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`:
- * execution was successful, result data available for fetch - `FAILED`: execution failed; reason
- * for failure described in accomanying error message - `CANCELED`: user canceled; can come from
- * explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful,
- * and statement closed; result no longer available for fetch
- */
@Generated
public enum StatementState {
- CANCELED, // user canceled; can come from explicit cancel call, or timeout with
- // `on_wait_timeout=CANCEL`
- CLOSED, // execution successful, and statement closed; result no longer available for
- // fetch
- FAILED, // execution failed; reason for failure described in accomanying error message
- PENDING, // waiting for warehouse
- RUNNING, // running
- SUCCEEDED, // execution was successful, result data available for fetch
+ CANCELED,
+ CLOSED,
+ FAILED,
+ PENDING,
+ RUNNING,
+ SUCCEEDED,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java
index ddbfd8aa0..0fd5f703d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java
@@ -14,7 +14,13 @@ public class StatementStatus {
@JsonProperty("error")
private ServiceError error;
- /** */
+ /**
+ * Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running -
+ * `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution
+ * failed; reason for failure described in accompanying error message - `CANCELED`: user canceled;
+ * can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`:
+ * execution successful, and statement closed; result no longer available for fetch
+ */
@JsonProperty("state")
private StatementState state;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java
index 6d33b75e1..9d1a89702 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java
@@ -4,11 +4,9 @@
import com.databricks.sdk.support.Generated;
-/** Health status of the warehouse. */
@Generated
public enum Status {
DEGRADED,
FAILED,
HEALTHY,
- STATUS_UNSPECIFIED,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java
index 3ee502add..8bfe1b758 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java
@@ -4,21 +4,36 @@
import com.databricks.sdk.support.Generated;
-/** status code indicating why the cluster was terminated */
+/** The status code indicating why the cluster was terminated */
@Generated
public enum TerminationReasonCode {
ABUSE_DETECTED,
+ ACCESS_TOKEN_FAILURE,
+ ALLOCATION_TIMEOUT,
+ ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY,
+ ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_READY_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS,
ATTACH_PROJECT_FAILURE,
AWS_AUTHORIZATION_FAILURE,
+ AWS_INACCESSIBLE_KMS_KEY_FAILURE,
+ AWS_INSTANCE_PROFILE_UPDATE_FAILURE,
AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE,
AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE,
+ AWS_INVALID_KEY_PAIR,
+ AWS_INVALID_KMS_KEY_STATE,
AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE,
AWS_REQUEST_LIMIT_EXCEEDED,
+ AWS_RESOURCE_QUOTA_EXCEEDED,
AWS_UNSUPPORTED_FAILURE,
AZURE_BYOK_KEY_PERMISSION_FAILURE,
AZURE_EPHEMERAL_DISK_FAILURE,
AZURE_INVALID_DEPLOYMENT_TEMPLATE,
AZURE_OPERATION_NOT_ALLOWED_EXCEPTION,
+ AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE,
AZURE_QUOTA_EXCEEDED_EXCEPTION,
AZURE_RESOURCE_MANAGER_THROTTLING,
AZURE_RESOURCE_PROVIDER_THROTTLING,
@@ -27,63 +42,148 @@ public enum TerminationReasonCode {
AZURE_VNET_CONFIGURATION_FAILURE,
BOOTSTRAP_TIMEOUT,
BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION,
+ BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG,
+ BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED,
+ BUDGET_POLICY_RESOLUTION_FAILURE,
+ CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED,
+ CLOUD_ACCOUNT_SETUP_FAILURE,
+ CLOUD_OPERATION_CANCELLED,
CLOUD_PROVIDER_DISK_SETUP_FAILURE,
+ CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED,
CLOUD_PROVIDER_LAUNCH_FAILURE,
+ CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG,
CLOUD_PROVIDER_RESOURCE_STOCKOUT,
+ CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG,
CLOUD_PROVIDER_SHUTDOWN,
+ CLUSTER_OPERATION_THROTTLED,
+ CLUSTER_OPERATION_TIMEOUT,
COMMUNICATION_LOST,
CONTAINER_LAUNCH_FAILURE,
CONTROL_PLANE_REQUEST_FAILURE,
+ CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG,
DATABASE_CONNECTION_FAILURE,
+ DATA_ACCESS_CONFIG_CHANGED,
DBFS_COMPONENT_UNHEALTHY,
+ DISASTER_RECOVERY_REPLICATION,
+ DNS_RESOLUTION_ERROR,
+ DOCKER_CONTAINER_CREATION_EXCEPTION,
DOCKER_IMAGE_PULL_FAILURE,
+ DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION,
+ DOCKER_INVALID_OS_EXCEPTION,
+ DRIVER_DNS_RESOLUTION_FAILURE,
+ DRIVER_EVICTION,
+ DRIVER_LAUNCH_TIMEOUT,
+ DRIVER_NODE_UNREACHABLE,
+ DRIVER_OUT_OF_DISK,
+ DRIVER_OUT_OF_MEMORY,
+ DRIVER_POD_CREATION_FAILURE,
+ DRIVER_UNEXPECTED_FAILURE,
+ DRIVER_UNHEALTHY,
DRIVER_UNREACHABLE,
DRIVER_UNRESPONSIVE,
+ DYNAMIC_SPARK_CONF_SIZE_EXCEEDED,
+ EOS_SPARK_IMAGE,
EXECUTION_COMPONENT_UNHEALTHY,
+ EXECUTOR_POD_UNSCHEDULED,
+ GCP_API_RATE_QUOTA_EXCEEDED,
+ GCP_DENIED_BY_ORG_POLICY,
+ GCP_FORBIDDEN,
+ GCP_IAM_TIMEOUT,
+ GCP_INACCESSIBLE_KMS_KEY_FAILURE,
+ GCP_INSUFFICIENT_CAPACITY,
+ GCP_IP_SPACE_EXHAUSTED,
+ GCP_KMS_KEY_PERMISSION_DENIED,
+ GCP_NOT_FOUND,
GCP_QUOTA_EXCEEDED,
+ GCP_RESOURCE_QUOTA_EXCEEDED,
+ GCP_SERVICE_ACCOUNT_ACCESS_DENIED,
GCP_SERVICE_ACCOUNT_DELETED,
+ GCP_SERVICE_ACCOUNT_NOT_FOUND,
+ GCP_SUBNET_NOT_READY,
+ GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED,
+ GKE_BASED_CLUSTER_TERMINATION,
GLOBAL_INIT_SCRIPT_FAILURE,
HIVE_METASTORE_PROVISIONING_FAILURE,
IMAGE_PULL_PERMISSION_DENIED,
INACTIVITY,
+ INIT_CONTAINER_NOT_FINISHED,
INIT_SCRIPT_FAILURE,
INSTANCE_POOL_CLUSTER_FAILURE,
+ INSTANCE_POOL_MAX_CAPACITY_REACHED,
+ INSTANCE_POOL_NOT_FOUND,
INSTANCE_UNREACHABLE,
+ INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG,
+ INTERNAL_CAPACITY_FAILURE,
INTERNAL_ERROR,
INVALID_ARGUMENT,
+ INVALID_AWS_PARAMETER,
+ INVALID_INSTANCE_PLACEMENT_PROTOCOL,
INVALID_SPARK_IMAGE,
+ INVALID_WORKER_IMAGE_FAILURE,
+ IN_PENALTY_BOX,
IP_EXHAUSTION_FAILURE,
JOB_FINISHED,
+ K8S_ACTIVE_POD_QUOTA_EXCEEDED,
K8S_AUTOSCALING_FAILURE,
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT,
+ LAZY_ALLOCATION_TIMEOUT,
+ MAINTENANCE_MODE,
METASTORE_COMPONENT_UNHEALTHY,
NEPHOS_RESOURCE_MANAGEMENT,
+ NETVISOR_SETUP_TIMEOUT,
+ NETWORK_CHECK_CONTROL_PLANE_FAILURE,
+ NETWORK_CHECK_DNS_SERVER_FAILURE,
+ NETWORK_CHECK_METADATA_ENDPOINT_FAILURE,
+ NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE,
+ NETWORK_CHECK_NIC_FAILURE,
+ NETWORK_CHECK_STORAGE_FAILURE,
NETWORK_CONFIGURATION_FAILURE,
NFS_MOUNT_FAILURE,
+ NO_ACTIVATED_K8S,
+ NO_ACTIVATED_K8S_TESTING_TAG,
+ NO_MATCHED_K8S,
+ NO_MATCHED_K8S_TESTING_TAG,
NPIP_TUNNEL_SETUP_FAILURE,
NPIP_TUNNEL_TOKEN_FAILURE,
+ POD_ASSIGNMENT_FAILURE,
+ POD_SCHEDULING_FAILURE,
REQUEST_REJECTED,
REQUEST_THROTTLED,
+ RESOURCE_USAGE_BLOCKED,
+ SECRET_CREATION_FAILURE,
+ SECRET_PERMISSION_DENIED,
SECRET_RESOLUTION_ERROR,
+ SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION,
SECURITY_DAEMON_REGISTRATION_EXCEPTION,
SELF_BOOTSTRAP_FAILURE,
+ SERVERLESS_LONG_RUNNING_TERMINATED,
SKIPPED_SLOW_NODES,
SLOW_IMAGE_DOWNLOAD,
SPARK_ERROR,
SPARK_IMAGE_DOWNLOAD_FAILURE,
+ SPARK_IMAGE_DOWNLOAD_THROTTLED,
+ SPARK_IMAGE_NOT_FOUND,
SPARK_STARTUP_FAILURE,
SPOT_INSTANCE_TERMINATION,
+ SSH_BOOTSTRAP_FAILURE,
STORAGE_DOWNLOAD_FAILURE,
+ STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG,
+ STORAGE_DOWNLOAD_FAILURE_SLOW,
+ STORAGE_DOWNLOAD_FAILURE_THROTTLED,
STS_CLIENT_SETUP_FAILURE,
SUBNET_EXHAUSTED_FAILURE,
TEMPORARILY_UNAVAILABLE,
TRIAL_EXPIRED,
UNEXPECTED_LAUNCH_FAILURE,
+ UNEXPECTED_POD_RECREATION,
UNKNOWN,
UNSUPPORTED_INSTANCE_TYPE,
UPDATE_INSTANCE_PROFILE_FAILURE,
+ USAGE_POLICY_ENTITLEMENT_DENIED,
+ USER_INITIATED_VM_TERMINATION,
USER_REQUEST,
WORKER_SETUP_FAILURE,
WORKSPACE_CANCELLED_ERROR,
WORKSPACE_CONFIGURATION_ERROR,
+ WORKSPACE_UPDATE,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java
index 562866b2e..bf221ba27 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java
@@ -7,6 +7,10 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/**
+ * * Configuration values to enable or disable the access to specific warehouse types in the
+ * workspace.
+ */
@Generated
public class WarehouseTypePair {
/**
@@ -16,7 +20,7 @@ public class WarehouseTypePair {
@JsonProperty("enabled")
private Boolean enabled;
- /** Warehouse type: `PRO` or `CLASSIC`. */
+ /** */
@JsonProperty("warehouse_type")
private WarehouseTypePairWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java
index a0d6f8870..563e75240 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Warehouse type: `PRO` or `CLASSIC`. */
@Generated
public enum WarehouseTypePairWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java
index 2382015a1..3253ff69d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java
@@ -189,7 +189,16 @@ public GetWorkspaceWarehouseConfigResponse getWorkspaceWarehouseConfig() {
/** Lists all SQL warehouses that a user has access to. */
public Iterable