From 6f63aae43ef570aa1eb1d8406e8f66434cef5b7f Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 15:09:18 +0100 Subject: [PATCH 01/11] Test updates --- .../multiple_resources/databricks.yml.tmpl | 29 +++++ .../multiple_resources/output.txt | 74 ++++++++++- .../multiple_resources/script | 13 +- .../out.plan_.direct.json | 120 ++++++++++++++++++ .../update-and-resize-autoscale/output.txt | 11 +- .../deploy/update-and-resize-autoscale/script | 4 +- .../update-and-resize/out.plan_.direct.json | 90 +++++++++++++ .../deploy/update-and-resize/output.txt | 11 +- .../clusters/deploy/update-and-resize/script | 4 +- .../resources/sql_warehouses/output.txt | 2 +- .../bundle/resources/sql_warehouses/script | 2 +- libs/testserver/clusters.go | 28 ++++ libs/testserver/experiments.go | 4 + libs/testserver/sql_warehouses.go | 21 ++- 14 files changed, 398 insertions(+), 15 deletions(-) diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/databricks.yml.tmpl b/acceptance/bundle/config-remote-sync/multiple_resources/databricks.yml.tmpl index a2eeaa90a5..0bdfbb0d3d 100644 --- a/acceptance/bundle/config-remote-sync/multiple_resources/databricks.yml.tmpl +++ b/acceptance/bundle/config-remote-sync/multiple_resources/databricks.yml.tmpl @@ -24,3 +24,32 @@ resources: spark_version: $DEFAULT_SPARK_VERSION node_type_id: $NODE_TYPE_ID num_workers: 1 + + clusters: + test_cluster: + cluster_name: cluster-$UNIQUE_NAME + spark_version: $DEFAULT_SPARK_VERSION + node_type_id: $NODE_TYPE_ID + num_workers: 1 + + experiments: + test_experiment: + name: /Users/$CURRENT_USER_NAME/experiment-$UNIQUE_NAME + + registered_models: + test_model: + name: model_$UNIQUE_NAME + catalog_name: main + schema_name: default + + volumes: + test_volume: + name: volume_$UNIQUE_NAME + catalog_name: main + schema_name: default + volume_type: MANAGED + + sql_warehouses: + test_warehouse: + name: warehouse-$UNIQUE_NAME + cluster_size: 2X-Small diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt index 50d3707407..98c84d4f40 100644 --- a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt +++ b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt @@ -5,7 +5,15 @@ Deployment complete! === Modify both jobs === Detect and save changes -Detected changes in 2 resource(s): +Detected changes in 7 resource(s): + +Resource: resources.clusters.test_cluster + aws_attributes: add + data_security_mode: add + driver_node_type_id: add + +Resource: resources.experiments.test_experiment + artifact_location: add Resource: resources.jobs.job_one max_concurrent_runs: replace @@ -15,6 +23,19 @@ Resource: resources.jobs.job_two max_concurrent_runs: replace tags: add +Resource: resources.registered_models.test_model + full_name: add + metastore_id: add + owner: add + +Resource: resources.sql_warehouses.test_warehouse + creator_name: add + min_num_clusters: add + warehouse_type: add + +Resource: resources.volumes.test_volume + storage_location: add + === Configuration changes @@ -39,16 +60,63 @@ Resource: resources.jobs.job_two + max_concurrent_runs: 10 tasks: - task_key: main -@@ -25,2 +27,4 @@ - node_type_id: [NODE_TYPE_ID] +@@ -26,4 +28,6 @@ num_workers: 1 + + tags: + team: ml + clusters: + test_cluster: +@@ -33,8 +37,14 @@ + num_workers: 1 + ++ aws_attributes: ++ availability: SPOT_WITH_FALLBACK ++ zone_id: us-east-1c ++ data_security_mode: SINGLE_USER ++ driver_node_type_id: [NODE_TYPE_ID] + experiments: + test_experiment: + name: /Users/[USERNAME]/experiment-[UNIQUE_NAME] + ++ artifact_location: dbfs:/databricks/mlflow-tracking/[NUMID] + registered_models: + test_model: +@@ -43,4 +53,7 @@ + schema_name: default + ++ full_name: main.default.model_[UNIQUE_NAME] ++ metastore_id: [UUID] ++ owner: [USERNAME] + volumes: + test_volume: +@@ -50,6 +63,10 @@ + volume_type: MANAGED + ++ storage_location: s3://deco-uc-prod-isolated-aws-us-east-1/metastore/[UUID]/volumes/[UUID] + sql_warehouses: + test_warehouse: + name: warehouse-[UNIQUE_NAME] + cluster_size: 2X-Small ++ creator_name: [USERNAME] ++ min_num_clusters: 1 ++ warehouse_type: CLASSIC >>> [CLI] bundle destroy --auto-approve The following resources will be deleted: + delete resources.clusters.test_cluster + delete resources.experiments.test_experiment delete resources.jobs.job_one delete resources.jobs.job_two + delete resources.registered_models.test_model + delete resources.sql_warehouses.test_warehouse + delete resources.volumes.test_volume + +This action will result in the deletion of the following volumes. +For managed volumes, the files stored in the volume are also deleted from your +cloud tenant within 30 days. For external volumes, the metadata about the volume +is removed from the catalog, but the underlying files are not deleted: + delete resources.volumes.test_volume All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/test-bundle-[UNIQUE_NAME]/default diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/script b/acceptance/bundle/config-remote-sync/multiple_resources/script index 3b398261b7..ee433c116d 100755 --- a/acceptance/bundle/config-remote-sync/multiple_resources/script +++ b/acceptance/bundle/config-remote-sync/multiple_resources/script @@ -3,6 +3,9 @@ envsubst < databricks.yml.tmpl > databricks.yml cleanup() { + # Restore original config before destroy to avoid Terraform errors + # from server-side-only fields (e.g. creator_name) written by config-remote-sync. + envsubst < databricks.yml.tmpl > databricks.yml trace $CLI bundle destroy --auto-approve } trap cleanup EXIT @@ -12,7 +15,15 @@ $CLI bundle deploy job_one_id="$(read_id.py job_one)" job_two_id="$(read_id.py job_two)" - +read_id.py test_cluster > /dev/null +read_id.py test_experiment > /dev/null +read_id.py test_model > /dev/null +read_id.py test_volume > /dev/null +read_id.py test_warehouse > /dev/null + +# Add replacements for dynamic values that appear in server-side defaults +add_repl.py "$($CLI current-user me | jq -r .id)" "USER_ID" +add_repl.py "$($CLI metastores current | jq -r .metastore_id)" "METASTORE_ID" title "Modify both jobs" edit_resource.py jobs $job_one_id <>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk) out.requests.txt { "method": "POST", "path": "/api/2.1/clusters/edit", @@ -55,7 +55,7 @@ Deploying resources... Updating deployment state... Deployment complete! ->>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk) out.requests.txt { "method": "POST", "path": "/api/2.1/clusters/edit", @@ -89,8 +89,15 @@ Deployment complete! "min_workers":3 }, "autotermination_minutes":60, + "aws_attributes": { + "availability":"SPOT_WITH_FALLBACK", + "zone_id":"us-east-1c" + }, "cluster_id":"[CLUSTER_ID]", "cluster_name":"test-cluster-[UNIQUE_NAME]", + "data_security_mode":"SINGLE_USER", + "driver_node_type_id":"[NODE_TYPE_ID]", + "enable_elastic_disk":false, "node_type_id":"[NODE_TYPE_ID]", "spark_version":"13.3.x-snapshot-scala2.12", "state":"RUNNING" diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script index 1f740c232d..f879f963f4 100755 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/script @@ -22,7 +22,7 @@ update_file.py databricks.yml " num_workers: 2" " autoscale: $CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt $CLI bundle plan -o json >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json trace $CLI bundle deploy -trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk)' out.requests.txt rm out.requests.txt title "Cluster should have autoscale\n" @@ -34,7 +34,7 @@ update_file.py databricks.yml "max_workers: 4" "max_workers: 5" $CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt $CLI bundle plan -o json >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json trace $CLI bundle deploy -trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk)' out.requests.txt rm out.requests.txt title "Cluster should have new autoscale\n" diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json index 62196ecf33..c96a75f91a 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json @@ -41,8 +41,15 @@ }, "remote_state": { "autotermination_minutes": 60, + "aws_attributes": { + "availability": "SPOT_WITH_FALLBACK", + "zone_id": "us-east-1c" + }, "cluster_id": "[CLUSTER_ID]", "cluster_name": "test-cluster-[UNIQUE_NAME]", + "data_security_mode": "SINGLE_USER", + "driver_node_type_id": "[NODE_TYPE_ID]", + "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 2, "spark_conf": { @@ -51,6 +58,29 @@ "spark_version": "13.3.x-snapshot-scala2.12" }, "changes": { + "aws_attributes": { + "action": "skip", + "reason": "server_side_default", + "remote": { + "availability": "SPOT_WITH_FALLBACK", + "zone_id": "us-east-1c" + } + }, + "data_security_mode": { + "action": "skip", + "reason": "server_side_default", + "remote": "SINGLE_USER" + }, + "driver_node_type_id": { + "action": "skip", + "reason": "server_side_default", + "remote": "[NODE_TYPE_ID]" + }, + "enable_elastic_disk": { + "action": "skip", + "reason": "empty", + "remote": false + }, "num_workers": { "action": "update", "old": 2, @@ -83,8 +113,15 @@ }, "remote_state": { "autotermination_minutes": 60, + "aws_attributes": { + "availability": "SPOT_WITH_FALLBACK", + "zone_id": "us-east-1c" + }, "cluster_id": "[CLUSTER_ID]", "cluster_name": "test-cluster-[UNIQUE_NAME]", + "data_security_mode": "SINGLE_USER", + "driver_node_type_id": "[NODE_TYPE_ID]", + "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 3, "spark_conf": { @@ -94,6 +131,29 @@ "state": "RUNNING" }, "changes": { + "aws_attributes": { + "action": "skip", + "reason": "server_side_default", + "remote": { + "availability": "SPOT_WITH_FALLBACK", + "zone_id": "us-east-1c" + } + }, + "data_security_mode": { + "action": "skip", + "reason": "server_side_default", + "remote": "SINGLE_USER" + }, + "driver_node_type_id": { + "action": "skip", + "reason": "server_side_default", + "remote": "[NODE_TYPE_ID]" + }, + "enable_elastic_disk": { + "action": "skip", + "reason": "empty", + "remote": false + }, "num_workers": { "action": "resize", "reason": "custom", @@ -127,8 +187,15 @@ }, "remote_state": { "autotermination_minutes": 60, + "aws_attributes": { + "availability": "SPOT_WITH_FALLBACK", + "zone_id": "us-east-1c" + }, "cluster_id": "[CLUSTER_ID]", "cluster_name": "test-cluster-[UNIQUE_NAME]", + "data_security_mode": "SINGLE_USER", + "driver_node_type_id": "[NODE_TYPE_ID]", + "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 4, "spark_conf": { @@ -138,6 +205,29 @@ "state": "RUNNING" }, "changes": { + "aws_attributes": { + "action": "skip", + "reason": "server_side_default", + "remote": { + "availability": "SPOT_WITH_FALLBACK", + "zone_id": "us-east-1c" + } + }, + "data_security_mode": { + "action": "skip", + "reason": "server_side_default", + "remote": "SINGLE_USER" + }, + "driver_node_type_id": { + "action": "skip", + "reason": "server_side_default", + "remote": "[NODE_TYPE_ID]" + }, + "enable_elastic_disk": { + "action": "skip", + "reason": "empty", + "remote": false + }, "num_workers": { "action": "resize", "reason": "custom", diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt b/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt index 2a835f1c61..6460372627 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/output.txt @@ -19,7 +19,7 @@ Deploying resources... Updating deployment state... Deployment complete! ->>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk) out.requests.txt { "method": "POST", "path": "/api/2.1/clusters/edit", @@ -45,8 +45,15 @@ Deployment complete! === Starting the cluster { "autotermination_minutes":60, + "aws_attributes": { + "availability":"SPOT_WITH_FALLBACK", + "zone_id":"us-east-1c" + }, "cluster_id":"[CLUSTER_ID]", "cluster_name":"test-cluster-[UNIQUE_NAME]", + "data_security_mode":"SINGLE_USER", + "driver_node_type_id":"[NODE_TYPE_ID]", + "enable_elastic_disk":false, "node_type_id":"[NODE_TYPE_ID]", "num_workers":3, "spark_conf": { @@ -88,7 +95,7 @@ Deploying resources... Updating deployment state... Deployment complete! ->>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) out.requests.txt +>>> jq select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk) out.requests.txt { "method": "POST", "path": "/api/2.1/clusters/edit", diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/script b/acceptance/bundle/resources/clusters/deploy/update-and-resize/script index 4820bde47e..f2d80d05de 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize/script +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/script @@ -20,7 +20,7 @@ update_file.py databricks.yml "num_workers: 2" "num_workers: 3" $CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt $CLI bundle plan -o json >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json trace $CLI bundle deploy -trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk)' out.requests.txt rm out.requests.txt title "Cluster should have new num_workers\n" @@ -46,7 +46,7 @@ update_file.py databricks.yml '"spark.executor.memory": "2g"' '"spark.executor.m $CLI bundle plan >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.txt $CLI bundle plan -o json >> out.plan_.$DATABRICKS_BUNDLE_ENGINE.json trace $CLI bundle deploy -trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit")))' out.requests.txt +trace jq 'select(.method == "POST" and (.path | contains("/clusters/edit"))) | del(.body.aws_attributes, .body.driver_node_type_id, .body.data_security_mode, .body.enable_elastic_disk)' out.requests.txt rm out.requests.txt title "Cluster should have new num_workers and spark_conf\n" diff --git a/acceptance/bundle/resources/sql_warehouses/output.txt b/acceptance/bundle/resources/sql_warehouses/output.txt index d8539928f3..a39c625f72 100644 --- a/acceptance/bundle/resources/sql_warehouses/output.txt +++ b/acceptance/bundle/resources/sql_warehouses/output.txt @@ -79,7 +79,7 @@ Updating deployment state... Deployment complete! === Verify the update request ->>> jq select(.method == "POST" and (.path | contains("/sql/warehouses")) and (.body."name" == "sql_warehouse_name_2")) out.requests.txt +>>> jq select(.method == "POST" and (.path | contains("/sql/warehouses")) and (.body."name" == "sql_warehouse_name_2")) | del(.body.creator_name, .body.warehouse_type) out.requests.txt { "method": "POST", "path": "/api/2.0/sql/warehouses/[UUID]/edit", diff --git a/acceptance/bundle/resources/sql_warehouses/script b/acceptance/bundle/resources/sql_warehouses/script index b8297b9377..b1d488f36d 100644 --- a/acceptance/bundle/resources/sql_warehouses/script +++ b/acceptance/bundle/resources/sql_warehouses/script @@ -20,7 +20,7 @@ trace update_file.py databricks.yml sql_warehouse_name sql_warehouse_name_2 trace $CLI bundle deploy title "Verify the update request" -trace jq 'select(.method == "POST" and (.path | contains("/sql/warehouses")) and (.body."name" == "sql_warehouse_name_2"))' out.requests.txt +trace jq 'select(.method == "POST" and (.path | contains("/sql/warehouses")) and (.body."name" == "sql_warehouse_name_2")) | del(.body.creator_name, .body.warehouse_type)' out.requests.txt trace $CLI bundle summary diff --git a/libs/testserver/clusters.go b/libs/testserver/clusters.go index 0b2d523c4a..baa7dc6d81 100644 --- a/libs/testserver/clusters.go +++ b/libs/testserver/clusters.go @@ -20,6 +20,7 @@ func (s *FakeWorkspace) ClustersCreate(req Request) any { clusterId := nextUUID() request.ClusterId = clusterId + clusterFixUps(&request) s.Clusters[clusterId] = request return Response{ @@ -66,10 +67,37 @@ func (s *FakeWorkspace) ClustersEdit(req Request) any { return Response{StatusCode: 404} } + clusterFixUps(&request) s.Clusters[request.ClusterId] = request return Response{} } +// clusterFixUps applies server-side defaults that the real API sets. +func clusterFixUps(cluster *compute.ClusterDetails) { + if cluster.AwsAttributes == nil { + cluster.AwsAttributes = &compute.AwsAttributes{ + Availability: compute.AwsAvailabilitySpotWithFallback, + ZoneId: "us-east-1c", + } + cluster.AwsAttributes.ForceSendFields = append( + cluster.AwsAttributes.ForceSendFields, + "Availability", + "ZoneId", + ) + } + + if cluster.DataSecurityMode == "" { + cluster.DataSecurityMode = compute.DataSecurityModeSingleUser + cluster.ForceSendFields = append(cluster.ForceSendFields, "DataSecurityMode") + } + + cluster.ForceSendFields = append(cluster.ForceSendFields, "EnableElasticDisk") + + if cluster.DriverNodeTypeId == "" && cluster.NodeTypeId != "" { + cluster.DriverNodeTypeId = cluster.NodeTypeId + } +} + func (s *FakeWorkspace) ClustersGet(req Request, clusterId string) any { defer s.LockUnlock()() diff --git a/libs/testserver/experiments.go b/libs/testserver/experiments.go index 166e4b842d..e35967deed 100644 --- a/libs/testserver/experiments.go +++ b/libs/testserver/experiments.go @@ -50,6 +50,10 @@ func (s *FakeWorkspace) ExperimentCreate(req Request) Response { experimentId := strconv.FormatInt(nextID(), 10) + if experiment.ArtifactLocation == "" { + experiment.ArtifactLocation = "dbfs:/databricks/mlflow-tracking/" + experimentId + } + // Strip /Workspace prefix from experiment name to match cloud behavior // Input: //Workspace/Users/foo -> Output: /Users/foo experimentName := experiment.Name diff --git a/libs/testserver/sql_warehouses.go b/libs/testserver/sql_warehouses.go index 385ab7a176..fa9aa562a6 100644 --- a/libs/testserver/sql_warehouses.go +++ b/libs/testserver/sql_warehouses.go @@ -8,6 +8,21 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) +func sqlWarehouseFixUps(warehouse *sql.GetWarehouseResponse, userName string) { + if warehouse.CreatorName == "" { + warehouse.CreatorName = userName + } + + if warehouse.MinNumClusters == 0 { + warehouse.MinNumClusters = 1 + warehouse.ForceSendFields = append(warehouse.ForceSendFields, "MinNumClusters") + } + + if warehouse.WarehouseType == "" { + warehouse.WarehouseType = sql.GetWarehouseResponseWarehouseTypeClassic + } +} + func (s *FakeWorkspace) SqlWarehousesUpsert(req Request, warehouseId string) Response { var warehouse sql.GetWarehouseResponse @@ -31,8 +46,11 @@ func (s *FakeWorkspace) SqlWarehousesUpsert(req Request, warehouseId string) Res warehouseId = nextUUID() } warehouse.Id = warehouseId - warehouse.Name = warehouseId + if warehouse.Name == "" { + warehouse.Name = warehouseId + } warehouse.State = sql.StateRunning + sqlWarehouseFixUps(&warehouse, s.CurrentUser().UserName) s.SqlWarehouses[warehouseId] = warehouse return Response{ @@ -41,6 +59,7 @@ func (s *FakeWorkspace) SqlWarehousesUpsert(req Request, warehouseId string) Res } } + func (s *FakeWorkspace) SqlWarehousesList(req Request) Response { var warehouses []sql.EndpointInfo for _, warehouse := range s.SqlWarehouses { From eaae4bbf47b1adb231a99c2c5cbcacdf4b567366 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 17:47:32 +0100 Subject: [PATCH 02/11] Update defaults --- .../multiple_resources/output.txt | 57 +----------- bundle/configsync/defaults.go | 91 ++++++++++++------- bundle/configsync/diff.go | 2 +- 3 files changed, 59 insertions(+), 91 deletions(-) diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt index 98c84d4f40..d7d8709ffd 100644 --- a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt +++ b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt @@ -5,15 +5,7 @@ Deployment complete! === Modify both jobs === Detect and save changes -Detected changes in 7 resource(s): - -Resource: resources.clusters.test_cluster - aws_attributes: add - data_security_mode: add - driver_node_type_id: add - -Resource: resources.experiments.test_experiment - artifact_location: add +Detected changes in 2 resource(s): Resource: resources.jobs.job_one max_concurrent_runs: replace @@ -23,19 +15,6 @@ Resource: resources.jobs.job_two max_concurrent_runs: replace tags: add -Resource: resources.registered_models.test_model - full_name: add - metastore_id: add - owner: add - -Resource: resources.sql_warehouses.test_warehouse - creator_name: add - min_num_clusters: add - warehouse_type: add - -Resource: resources.volumes.test_volume - storage_location: add - === Configuration changes @@ -67,40 +46,6 @@ Resource: resources.volumes.test_volume + team: ml clusters: test_cluster: -@@ -33,8 +37,14 @@ - num_workers: 1 - -+ aws_attributes: -+ availability: SPOT_WITH_FALLBACK -+ zone_id: us-east-1c -+ data_security_mode: SINGLE_USER -+ driver_node_type_id: [NODE_TYPE_ID] - experiments: - test_experiment: - name: /Users/[USERNAME]/experiment-[UNIQUE_NAME] - -+ artifact_location: dbfs:/databricks/mlflow-tracking/[NUMID] - registered_models: - test_model: -@@ -43,4 +53,7 @@ - schema_name: default - -+ full_name: main.default.model_[UNIQUE_NAME] -+ metastore_id: [UUID] -+ owner: [USERNAME] - volumes: - test_volume: -@@ -50,6 +63,10 @@ - volume_type: MANAGED - -+ storage_location: s3://deco-uc-prod-isolated-aws-us-east-1/metastore/[UUID]/volumes/[UUID] - sql_warehouses: - test_warehouse: - name: warehouse-[UNIQUE_NAME] - cluster_size: 2X-Small -+ creator_name: [USERNAME] -+ min_num_clusters: 1 -+ warehouse_type: CLASSIC >>> [CLI] bundle destroy --auto-approve The following resources will be deleted: diff --git a/bundle/configsync/defaults.go b/bundle/configsync/defaults.go index 735e4349b2..40a5032eeb 100644 --- a/bundle/configsync/defaults.go +++ b/bundle/configsync/defaults.go @@ -25,49 +25,72 @@ var ( // Other fields are compared using reflect.DeepEqual. var serverSideDefaults = map[string]any{ // Job-level fields - "timeout_seconds": zeroOrNil, - "email_notifications": emptyEmailNotifications, - "webhook_notifications": map[string]any{}, - "edit_mode": alwaysSkip, // set by CLI - "performance_target": "PERFORMANCE_OPTIMIZED", + "resources.jobs.*.timeout_seconds": zeroOrNil, + "resources.jobs.*.email_notifications": emptyEmailNotifications, + "resources.jobs.*.webhook_notifications": map[string]any{}, + "resources.jobs.*.edit_mode": alwaysSkip, // set by CLI + "resources.jobs.*.performance_target": "PERFORMANCE_OPTIMIZED", // Task-level fields - "tasks[*].run_if": "ALL_SUCCESS", - "tasks[*].disabled": false, - "tasks[*].timeout_seconds": zeroOrNil, - "tasks[*].notebook_task.source": "WORKSPACE", - "tasks[*].email_notifications": emptyEmailNotifications, - "tasks[*].webhook_notifications": map[string]any{}, - "tasks[*].pipeline_task.full_refresh": false, - - "tasks[*].for_each_task.task.run_if": "ALL_SUCCESS", - "tasks[*].for_each_task.task.disabled": false, - "tasks[*].for_each_task.task.timeout_seconds": zeroOrNil, - "tasks[*].for_each_task.task.notebook_task.source": "WORKSPACE", - "tasks[*].for_each_task.task.email_notifications": emptyEmailNotifications, - "tasks[*].for_each_task.task.webhook_notifications": map[string]any{}, + "resources.jobs.*.tasks[*].run_if": "ALL_SUCCESS", + "resources.jobs.*.tasks[*].disabled": false, + "resources.jobs.*.tasks[*].timeout_seconds": zeroOrNil, + "resources.jobs.*.tasks[*].notebook_task.source": "WORKSPACE", + "resources.jobs.*.tasks[*].email_notifications": emptyEmailNotifications, + "resources.jobs.*.tasks[*].webhook_notifications": map[string]any{}, + "resources.jobs.*.tasks[*].pipeline_task.full_refresh": false, + + "resources.jobs.*.tasks[*].for_each_task.task.run_if": "ALL_SUCCESS", + "resources.jobs.*.tasks[*].for_each_task.task.disabled": false, + "resources.jobs.*.tasks[*].for_each_task.task.timeout_seconds": zeroOrNil, + "resources.jobs.*.tasks[*].for_each_task.task.notebook_task.source": "WORKSPACE", + "resources.jobs.*.tasks[*].for_each_task.task.email_notifications": emptyEmailNotifications, + "resources.jobs.*.tasks[*].for_each_task.task.webhook_notifications": map[string]any{}, // Cluster fields (tasks) - "tasks[*].new_cluster.aws_attributes": alwaysSkip, - "tasks[*].new_cluster.azure_attributes": alwaysSkip, - "tasks[*].new_cluster.gcp_attributes": alwaysSkip, - "tasks[*].new_cluster.data_security_mode": "SINGLE_USER", // TODO this field is computed on some workspaces in integration tests, check why and if we can skip it - "tasks[*].new_cluster.enable_elastic_disk": alwaysSkip, // deprecated field + "resources.jobs.*.tasks[*].new_cluster.aws_attributes": alwaysSkip, + "resources.jobs.*.tasks[*].new_cluster.azure_attributes": alwaysSkip, + "resources.jobs.*.tasks[*].new_cluster.gcp_attributes": alwaysSkip, + "resources.jobs.*.tasks[*].new_cluster.data_security_mode": "SINGLE_USER", // TODO this field is computed on some workspaces in integration tests, check why and if we can skip it + "resources.jobs.*.tasks[*].new_cluster.enable_elastic_disk": alwaysSkip, // deprecated field // Cluster fields (job_clusters) - "job_clusters[*].new_cluster.aws_attributes": alwaysSkip, - "job_clusters[*].new_cluster.azure_attributes": alwaysSkip, - "job_clusters[*].new_cluster.gcp_attributes": alwaysSkip, - "job_clusters[*].new_cluster.data_security_mode": "SINGLE_USER", // TODO this field is computed on some workspaces in integration tests, check why and if we can skip it - - "job_clusters[*].new_cluster.enable_elastic_disk": alwaysSkip, // deprecated field + "resources.jobs.*.job_clusters[*].new_cluster.aws_attributes": alwaysSkip, + "resources.jobs.*.job_clusters[*].new_cluster.azure_attributes": alwaysSkip, + "resources.jobs.*.job_clusters[*].new_cluster.gcp_attributes": alwaysSkip, + "resources.jobs.*.job_clusters[*].new_cluster.data_security_mode": "SINGLE_USER", // TODO this field is computed on some workspaces in integration tests, check why and if we can skip it + "resources.jobs.*.job_clusters[*].new_cluster.enable_elastic_disk": alwaysSkip, // deprecated field + + // Standalone cluster fields + "resources.clusters.*.aws_attributes": alwaysSkip, + "resources.clusters.*.azure_attributes": alwaysSkip, + "resources.clusters.*.gcp_attributes": alwaysSkip, + "resources.clusters.*.data_security_mode": "SINGLE_USER", + "resources.clusters.*.driver_node_type_id": alwaysSkip, + "resources.clusters.*.enable_elastic_disk": alwaysSkip, + + // Experiment fields + "resources.experiments.*.artifact_location": alwaysSkip, + + // Registered model fields + "resources.registered_models.*.full_name": alwaysSkip, + "resources.registered_models.*.metastore_id": alwaysSkip, + "resources.registered_models.*.owner": alwaysSkip, + + // Volume fields + "resources.volumes.*.storage_location": alwaysSkip, + + // SQL warehouse fields + "resources.sql_warehouses.*.creator_name": alwaysSkip, + "resources.sql_warehouses.*.min_num_clusters": int64(1), + "resources.sql_warehouses.*.warehouse_type": "CLASSIC", // Terraform defaults - "run_as": alwaysSkip, + "resources.jobs.*.run_as": alwaysSkip, // Pipeline fields - "storage": alwaysSkip, - "continuous": false, + "resources.pipelines.*.storage": alwaysSkip, + "resources.pipelines.*.continuous": false, } func shouldSkipField(path string, value any) bool { @@ -136,7 +159,7 @@ func matchParts(patternParts, pathParts []string) bool { // If CLI-defaulted field is changed on remote and should be disabled (e.g. queueing disabled -> remote field is nil) // we can't define it in the config as "null" because CLI default will be applied again. var resetValues = map[string]any{ - "queue": map[string]any{ + "resources.jobs.*.queue": map[string]any{ "enabled": false, }, } diff --git a/bundle/configsync/diff.go b/bundle/configsync/diff.go index df048597c1..481c5a4a93 100644 --- a/bundle/configsync/diff.go +++ b/bundle/configsync/diff.go @@ -151,7 +151,7 @@ func DetectChanges(ctx context.Context, b *bundle.Bundle, engine engine.EngineTy continue } - change, err := convertChangeDesc(path, changeDesc) + change, err := convertChangeDesc(resourceKey+"."+path, changeDesc) if err != nil { return nil, fmt.Errorf("failed to compute config change for path %s: %w", path, err) } From 57fded7ebe99f9202e8fca2f0215597808389e1c Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 17:50:56 +0100 Subject: [PATCH 03/11] Update defaults --- .../config-remote-sync/multiple_resources/output.txt | 11 ++++++++++- bundle/configsync/defaults.go | 7 ++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt index d7d8709ffd..45ed40b4de 100644 --- a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt +++ b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt @@ -5,7 +5,7 @@ Deployment complete! === Modify both jobs === Detect and save changes -Detected changes in 2 resource(s): +Detected changes in 3 resource(s): Resource: resources.jobs.job_one max_concurrent_runs: replace @@ -15,6 +15,9 @@ Resource: resources.jobs.job_two max_concurrent_runs: replace tags: add +Resource: resources.registered_models.test_model + storage_location: add + === Configuration changes @@ -46,6 +49,12 @@ Resource: resources.jobs.job_two + team: ml clusters: test_cluster: +@@ -43,4 +47,5 @@ + schema_name: default + ++ storage_location: s3://deco-uc-prod-isolated-aws-us-east-1/metastore/[UUID]/models/[UUID] + volumes: + test_volume: >>> [CLI] bundle destroy --auto-approve The following resources will be deleted: diff --git a/bundle/configsync/defaults.go b/bundle/configsync/defaults.go index 40a5032eeb..d04c807f92 100644 --- a/bundle/configsync/defaults.go +++ b/bundle/configsync/defaults.go @@ -73,9 +73,10 @@ var serverSideDefaults = map[string]any{ "resources.experiments.*.artifact_location": alwaysSkip, // Registered model fields - "resources.registered_models.*.full_name": alwaysSkip, - "resources.registered_models.*.metastore_id": alwaysSkip, - "resources.registered_models.*.owner": alwaysSkip, + "resources.registered_models.*.full_name": alwaysSkip, + "resources.registered_models.*.metastore_id": alwaysSkip, + "resources.registered_models.*.owner": alwaysSkip, + "resources.registered_models.*.storage_location": alwaysSkip, // Volume fields "resources.volumes.*.storage_location": alwaysSkip, From 5f691e785756fa5b2b3159e01f58b7923142804b Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 17:52:00 +0100 Subject: [PATCH 04/11] Updated output --- .../config-remote-sync/multiple_resources/output.txt | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt index 45ed40b4de..d7d8709ffd 100644 --- a/acceptance/bundle/config-remote-sync/multiple_resources/output.txt +++ b/acceptance/bundle/config-remote-sync/multiple_resources/output.txt @@ -5,7 +5,7 @@ Deployment complete! === Modify both jobs === Detect and save changes -Detected changes in 3 resource(s): +Detected changes in 2 resource(s): Resource: resources.jobs.job_one max_concurrent_runs: replace @@ -15,9 +15,6 @@ Resource: resources.jobs.job_two max_concurrent_runs: replace tags: add -Resource: resources.registered_models.test_model - storage_location: add - === Configuration changes @@ -49,12 +46,6 @@ Resource: resources.registered_models.test_model + team: ml clusters: test_cluster: -@@ -43,4 +47,5 @@ - schema_name: default - -+ storage_location: s3://deco-uc-prod-isolated-aws-us-east-1/metastore/[UUID]/models/[UUID] - volumes: - test_volume: >>> [CLI] bundle destroy --auto-approve The following resources will be deleted: From 211901f699616ea34308807c7ce30c38f69158d3 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 19:22:20 +0100 Subject: [PATCH 05/11] Remove redundant operations --- .../bundle/config-remote-sync/multiple_resources/script | 5 ----- 1 file changed, 5 deletions(-) diff --git a/acceptance/bundle/config-remote-sync/multiple_resources/script b/acceptance/bundle/config-remote-sync/multiple_resources/script index ee433c116d..ea1b2d8859 100755 --- a/acceptance/bundle/config-remote-sync/multiple_resources/script +++ b/acceptance/bundle/config-remote-sync/multiple_resources/script @@ -15,11 +15,6 @@ $CLI bundle deploy job_one_id="$(read_id.py job_one)" job_two_id="$(read_id.py job_two)" -read_id.py test_cluster > /dev/null -read_id.py test_experiment > /dev/null -read_id.py test_model > /dev/null -read_id.py test_volume > /dev/null -read_id.py test_warehouse > /dev/null # Add replacements for dynamic values that appear in server-side defaults add_repl.py "$($CLI current-user me | jq -r .id)" "USER_ID" From 89be85c563ad2cda830aa7925b86538544b99ba5 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 19:26:45 +0100 Subject: [PATCH 06/11] Fix single user name --- bundle/configsync/defaults.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/bundle/configsync/defaults.go b/bundle/configsync/defaults.go index d04c807f92..9b0157bf03 100644 --- a/bundle/configsync/defaults.go +++ b/bundle/configsync/defaults.go @@ -53,6 +53,7 @@ var serverSideDefaults = map[string]any{ "resources.jobs.*.tasks[*].new_cluster.gcp_attributes": alwaysSkip, "resources.jobs.*.tasks[*].new_cluster.data_security_mode": "SINGLE_USER", // TODO this field is computed on some workspaces in integration tests, check why and if we can skip it "resources.jobs.*.tasks[*].new_cluster.enable_elastic_disk": alwaysSkip, // deprecated field + "resources.jobs.*.tasks[*].new_cluster.single_user_name": alwaysSkip, // Cluster fields (job_clusters) "resources.jobs.*.job_clusters[*].new_cluster.aws_attributes": alwaysSkip, @@ -60,6 +61,7 @@ var serverSideDefaults = map[string]any{ "resources.jobs.*.job_clusters[*].new_cluster.gcp_attributes": alwaysSkip, "resources.jobs.*.job_clusters[*].new_cluster.data_security_mode": "SINGLE_USER", // TODO this field is computed on some workspaces in integration tests, check why and if we can skip it "resources.jobs.*.job_clusters[*].new_cluster.enable_elastic_disk": alwaysSkip, // deprecated field + "resources.jobs.*.job_clusters[*].new_cluster.single_user_name": alwaysSkip, // Standalone cluster fields "resources.clusters.*.aws_attributes": alwaysSkip, @@ -68,15 +70,16 @@ var serverSideDefaults = map[string]any{ "resources.clusters.*.data_security_mode": "SINGLE_USER", "resources.clusters.*.driver_node_type_id": alwaysSkip, "resources.clusters.*.enable_elastic_disk": alwaysSkip, + "resources.clusters.*.single_user_name": alwaysSkip, // Experiment fields "resources.experiments.*.artifact_location": alwaysSkip, // Registered model fields - "resources.registered_models.*.full_name": alwaysSkip, - "resources.registered_models.*.metastore_id": alwaysSkip, - "resources.registered_models.*.owner": alwaysSkip, - "resources.registered_models.*.storage_location": alwaysSkip, + "resources.registered_models.*.full_name": alwaysSkip, + "resources.registered_models.*.metastore_id": alwaysSkip, + "resources.registered_models.*.owner": alwaysSkip, + "resources.registered_models.*.storage_location": alwaysSkip, // Volume fields "resources.volumes.*.storage_location": alwaysSkip, From f94192e1ca1ffbfab6fd934164af722b23aef30c Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 19:47:00 +0100 Subject: [PATCH 07/11] Fix lint --- libs/testserver/sql_warehouses.go | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/testserver/sql_warehouses.go b/libs/testserver/sql_warehouses.go index fa9aa562a6..6e7fd3a2f9 100644 --- a/libs/testserver/sql_warehouses.go +++ b/libs/testserver/sql_warehouses.go @@ -59,7 +59,6 @@ func (s *FakeWorkspace) SqlWarehousesUpsert(req Request, warehouseId string) Res } } - func (s *FakeWorkspace) SqlWarehousesList(req Request) Response { var warehouses []sql.EndpointInfo for _, warehouse := range s.SqlWarehouses { From f0b779549c8a10afe4b0020b23089637703fa6b1 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 20:29:47 +0100 Subject: [PATCH 08/11] Update cluster output --- .../update-and-resize-autoscale/out.plan_.direct.json | 6 ++++++ .../clusters/deploy/update-and-resize/out.plan_.direct.json | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json index 627f4ff3de..be8b055c81 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json @@ -49,6 +49,7 @@ "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 2, + "single_user_name": "[USERNAME]", "spark_version": "13.3.x-snapshot-scala2.12" }, "changes": { @@ -86,6 +87,11 @@ "action": "update", "old": 2, "remote": 2 + }, + "single_user_name": { + "action": "skip", + "reason": "server_side_default", + "remote": "[USERNAME]" } } } diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json index c96a75f91a..b3c004c9df 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json @@ -52,6 +52,7 @@ "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 2, + "single_user_name": "[USERNAME]", "spark_conf": { "spark.executor.memory": "2g" }, @@ -86,6 +87,11 @@ "old": 2, "new": 3, "remote": 2 + }, + "single_user_name": { + "action": "skip", + "reason": "server_side_default", + "remote": "[USERNAME]" } } } From 58a45d91786e25f493ddcd8b68a91af6218c80b8 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 21:04:26 +0100 Subject: [PATCH 09/11] Fix tests --- libs/testserver/clusters.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/testserver/clusters.go b/libs/testserver/clusters.go index 805868111f..9a934324f8 100644 --- a/libs/testserver/clusters.go +++ b/libs/testserver/clusters.go @@ -21,7 +21,6 @@ func (s *FakeWorkspace) ClustersCreate(req Request) any { clusterId := nextUUID() request.ClusterId = clusterId - clusterFixUps(&request) // Match cloud behavior: SINGLE_USER clusters automatically get single_user_name set // to the current user. This enables terraform drift detection when the bundle config @@ -30,6 +29,8 @@ func (s *FakeWorkspace) ClustersCreate(req Request) any { request.SingleUserName = s.CurrentUser().UserName } + clusterFixUps(&request) + s.Clusters[clusterId] = request return Response{ From be38707afb9239f8952ea2ae93c800f858c35d10 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Tue, 17 Feb 2026 21:12:14 +0100 Subject: [PATCH 10/11] Fix tests --- .../update-and-resize-autoscale/out.plan_.direct.json | 6 ------ .../clusters/deploy/update-and-resize/out.plan_.direct.json | 6 ------ 2 files changed, 12 deletions(-) diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json index be8b055c81..627f4ff3de 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize-autoscale/out.plan_.direct.json @@ -49,7 +49,6 @@ "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 2, - "single_user_name": "[USERNAME]", "spark_version": "13.3.x-snapshot-scala2.12" }, "changes": { @@ -87,11 +86,6 @@ "action": "update", "old": 2, "remote": 2 - }, - "single_user_name": { - "action": "skip", - "reason": "server_side_default", - "remote": "[USERNAME]" } } } diff --git a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json index b3c004c9df..c96a75f91a 100644 --- a/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json +++ b/acceptance/bundle/resources/clusters/deploy/update-and-resize/out.plan_.direct.json @@ -52,7 +52,6 @@ "enable_elastic_disk": false, "node_type_id": "[NODE_TYPE_ID]", "num_workers": 2, - "single_user_name": "[USERNAME]", "spark_conf": { "spark.executor.memory": "2g" }, @@ -87,11 +86,6 @@ "old": 2, "new": 3, "remote": 2 - }, - "single_user_name": { - "action": "skip", - "reason": "server_side_default", - "remote": "[USERNAME]" } } } From 6484d9a86301f04bd0a0ace1a796ec25734c8b3c Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Wed, 18 Feb 2026 00:01:52 +0100 Subject: [PATCH 11/11] Fix tests --- .../bundle/resources/permissions/output.txt | 23 ++++++++++++++++++- .../out.requests.destroy.terraform.json | 16 +++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/acceptance/bundle/resources/permissions/output.txt b/acceptance/bundle/resources/permissions/output.txt index ed3f0ac83e..7e3d1bdf4a 100644 --- a/acceptance/bundle/resources/permissions/output.txt +++ b/acceptance/bundle/resources/permissions/output.txt @@ -332,7 +332,28 @@ EXACT pipelines/other_can_manage/out.requests.destroy.direct.json EXACT pipelines/other_is_owner/out.requests.deploy.direct.json EXACT pipelines/other_is_owner/out.requests.destroy.direct.json MATCH sql_warehouses/current_can_manage/out.requests.deploy.direct.json -EXACT sql_warehouses/current_can_manage/out.requests.destroy.direct.json +DIFF sql_warehouses/current_can_manage/out.requests.destroy.direct.json +--- sql_warehouses/current_can_manage/out.requests.destroy.direct.json ++++ sql_warehouses/current_can_manage/out.requests.destroy.terraform.json +@@ -1 +1,18 @@ +-[]+[ ++ { ++ "body": { ++ "access_control_list": [ ++ { ++ "permission_level": "CAN_MANAGE", ++ "user_name": "[USERNAME]" ++ }, ++ { ++ "permission_level": "IS_OWNER", ++ "user_name": "[USERNAME]" ++ } ++ ] ++ }, ++ "method": "PUT", ++ "path": "/api/2.0/permissions/sql/warehouses/[UUID]" ++ } ++] EXACT target_permissions/out.requests_create.direct.json DIFF target_permissions/out.requests_delete.direct.json --- target_permissions/out.requests_delete.direct.json diff --git a/acceptance/bundle/resources/permissions/sql_warehouses/current_can_manage/out.requests.destroy.terraform.json b/acceptance/bundle/resources/permissions/sql_warehouses/current_can_manage/out.requests.destroy.terraform.json index e69de29bb2..2abb177cb6 100644 --- a/acceptance/bundle/resources/permissions/sql_warehouses/current_can_manage/out.requests.destroy.terraform.json +++ b/acceptance/bundle/resources/permissions/sql_warehouses/current_can_manage/out.requests.destroy.terraform.json @@ -0,0 +1,16 @@ +{ + "method": "PUT", + "path": "/api/2.0/permissions/sql/warehouses/[UUID]", + "body": { + "access_control_list": [ + { + "permission_level": "CAN_MANAGE", + "user_name": "[USERNAME]" + }, + { + "permission_level": "IS_OWNER", + "user_name": "[USERNAME]" + } + ] + } +}