Skip to content

Commit

Permalink
Add Hybrid Cluster Workspace Authorization Resource (#77)
Browse files Browse the repository at this point in the history
Co-authored-by: Vandy Liu <[email protected]>
  • Loading branch information
ichung08 and vandyliu authored Jun 4, 2024
1 parent b91c51f commit d87be6d
Show file tree
Hide file tree
Showing 12 changed files with 635 additions and 48 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/testacc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ jobs:
"internal/provider/resources/resource_cluster.go"
"internal/provider/resources/resource_cluster_test.go"
"internal/provider/resources/resource_deployment.go"
"internal/provider/resources/common_cluster.go"
)
for file in "${FILES_TO_CHECK[@]}"; do
if git diff --name-only remotes/origin/${{ github.base_ref }} remotes/origin/${{ github.head_ref }} | grep -q "$file"; then
Expand All @@ -90,6 +91,7 @@ jobs:
ASTRO_API_HOST: https://api.astronomer-dev.io
SKIP_CLUSTER_RESOURCE_TESTS: ${{ env.SKIP_CLUSTER_RESOURCE_TESTS }}
HOSTED_TEAM_ID: clwbclrc100bl01ozjj5s4jmq
HYBRID_WORKSPACE_IDS: cl70oe7cu445571iynrkthtybl,cl8wpve4993871i37qe1k152c
TESTARGS: "-failfast"
run: make testacc

Expand Down Expand Up @@ -131,6 +133,7 @@ jobs:
HYBRID_NODE_POOL_ID: clqqongl40fmu01m94pwp4kct
ASTRO_API_HOST: https://api.astronomer-stage.io
HOSTED_TEAM_ID: clwv0r0x7091n01l0t1fm4vxy
HYBRID_WORKSPACE_IDS: clwv06sva08vg01hovu1j7znw
TESTARGS: "-failfast"
run: make testacc

Expand Down Expand Up @@ -172,5 +175,6 @@ jobs:
HYBRID_NODE_POOL_ID: clnp86ly5000301ndzfxz895w
ASTRO_API_HOST: https://api.astronomer-dev.io
HOSTED_TEAM_ID: clwbclrc100bl01ozjj5s4jmq
HYBRID_WORKSPACE_IDS: cl70oe7cu445571iynrkthtybl,cl8wpve4993871i37qe1k152c
TESTARGS: "-failfast"
run: make testacc
31 changes: 31 additions & 0 deletions docs/resources/hybrid_cluster_workspace_authorization.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "astro_hybrid_cluster_workspace_authorization Resource - astro"
subcategory: ""
description: |-
Hybrid cluster workspace authorization resource
---

# astro_hybrid_cluster_workspace_authorization (Resource)

Hybrid cluster workspace authorization resource

## Example Usage

```terraform
resource "astro_hybrid_cluster_workspace_authorization" "example" {
cluster_id = "clk8h0fv1006801j8yysfybbt"
workspace_ids = ["cl70oe7cu445571iynrkthtybl", "cl70oe7cu445571iynrkthacsd"]
}
```

<!-- schema generated by tfplugindocs -->
## Schema

### Required

- `cluster_id` (String) The ID of the hybrid cluster to set authorizations for

### Optional

- `workspace_ids` (Set of String) The IDs of the workspaces to authorize for the hybrid cluster
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
resource "astro_hybrid_cluster_workspace_authorization" "example" {
cluster_id = "clk8h0fv1006801j8yysfybbt"
workspace_ids = ["cl70oe7cu445571iynrkthtybl", "cl70oe7cu445571iynrkthacsd"]
}
30 changes: 30 additions & 0 deletions internal/provider/models/hybrid_cluster_workspace_authorization.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
package models

import (
"github.com/astronomer/terraform-provider-astro/internal/clients/platform"
"github.com/astronomer/terraform-provider-astro/internal/utils"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
)

type HybridClusterWorkspaceAuthorizationResource struct {
ClusterId types.String `tfsdk:"cluster_id"`
WorkspaceIds types.Set `tfsdk:"workspace_ids"`
}

func (data *HybridClusterWorkspaceAuthorizationResource) ReadFromResponse(
cluster *platform.Cluster,
) diag.Diagnostics {
var diags diag.Diagnostics
data.ClusterId = types.StringValue(cluster.Id)
if cluster.WorkspaceIds == nil || len(*cluster.WorkspaceIds) == 0 {
data.WorkspaceIds = types.SetNull(types.StringType)
} else {
data.WorkspaceIds, diags = utils.StringSet(cluster.WorkspaceIds)
if diags.HasError() {
return diags
}
}

return nil
}
1 change: 1 addition & 0 deletions internal/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ func (p *AstroProvider) Resources(ctx context.Context) []func() resource.Resourc
resources.NewDeploymentResource,
resources.NewClusterResource,
resources.NewTeamRolesResource,
resources.NewHybridClusterWorkspaceAuthorizationResource,
}
}

Expand Down
3 changes: 3 additions & 0 deletions internal/provider/provider_test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ func TestAccPreCheck(t *testing.T) {
if hybridOrgId := os.Getenv("HYBRID_ORGANIZATION_ID"); len(hybridOrgId) == 0 {
missingEnvVars = append(missingEnvVars, "HYBRID_ORGANIZATION_ID")
}
if hybridWorkspaceIds := os.Getenv("HYBRID_WORKSPACE_IDS"); len(hybridWorkspaceIds) == 0 {
missingEnvVars = append(missingEnvVars, "HYBRID_WORKSPACE_IDS")
}
if host := os.Getenv("ASTRO_API_HOST"); len(host) == 0 {
missingEnvVars = append(missingEnvVars, "ASTRO_API_HOST")
}
Expand Down
47 changes: 47 additions & 0 deletions internal/provider/resources/common_cluster.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package resources

import (
"context"
"fmt"
"net/http"

"github.com/astronomer/terraform-provider-astro/internal/clients"
"github.com/astronomer/terraform-provider-astro/internal/clients/platform"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
)

// ClusterResourceRefreshFunc returns a retry.StateRefreshFunc that polls the platform API for the cluster status
// If the cluster is not found, it returns "DELETED" status
// If the cluster is found, it returns the cluster status
// If there is an error, it returns the error
// WaitForStateContext will keep polling until the target status is reached, the timeout is reached or an err is returned
func ClusterResourceRefreshFunc(ctx context.Context, platformClient *platform.ClientWithResponses, organizationId string, clusterId string) retry.StateRefreshFunc {
return func() (any, string, error) {
cluster, err := platformClient.GetClusterWithResponse(ctx, organizationId, clusterId)
if err != nil {
tflog.Error(ctx, "failed to get cluster while polling for cluster 'CREATED' status", map[string]interface{}{"error": err})
return nil, "", err
}
statusCode, diagnostic := clients.NormalizeAPIError(ctx, cluster.HTTPResponse, cluster.Body)
if statusCode == http.StatusNotFound {
return &platform.Cluster{}, "DELETED", nil
}
if diagnostic != nil {
return nil, "", fmt.Errorf("error getting cluster %s", diagnostic.Detail())
}
if cluster != nil && cluster.JSON200 != nil {
switch cluster.JSON200.Status {
case platform.ClusterStatusCREATED:
return cluster.JSON200, string(cluster.JSON200.Status), nil
case platform.ClusterStatusUPDATEFAILED, platform.ClusterStatusCREATEFAILED:
return cluster.JSON200, string(cluster.JSON200.Status), fmt.Errorf("cluster mutation failed for cluster '%v'", cluster.JSON200.Id)
case platform.ClusterStatusCREATING, platform.ClusterStatusUPDATING:
return cluster.JSON200, string(cluster.JSON200.Status), nil
default:
return cluster.JSON200, string(cluster.JSON200.Status), fmt.Errorf("unexpected cluster status '%v' for cluster '%v'", cluster.JSON200.Status, cluster.JSON200.Id)
}
}
return nil, "", fmt.Errorf("error getting cluster %s", clusterId)
}
}
41 changes: 3 additions & 38 deletions internal/provider/resources/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ func (r *ClusterResource) Create(
stateConf := &retry.StateChangeConf{
Pending: []string{string(platform.ClusterStatusCREATING), string(platform.ClusterStatusUPDATING)},
Target: []string{string(platform.ClusterStatusCREATED), string(platform.ClusterStatusUPDATEFAILED), string(platform.ClusterStatusCREATEFAILED)},
Refresh: r.resourceRefreshFunc(ctx, cluster.JSON200.Id),
Refresh: ClusterResourceRefreshFunc(ctx, r.platformClient, r.organizationId, cluster.JSON200.Id),
Timeout: 3 * time.Hour,
MinTimeout: 1 * time.Minute,
}
Expand Down Expand Up @@ -370,7 +370,7 @@ func (r *ClusterResource) Update(
stateConf := &retry.StateChangeConf{
Pending: []string{string(platform.ClusterStatusCREATING), string(platform.ClusterStatusUPDATING)},
Target: []string{string(platform.ClusterStatusCREATED), string(platform.ClusterStatusUPDATEFAILED), string(platform.ClusterStatusCREATEFAILED)},
Refresh: r.resourceRefreshFunc(ctx, cluster.JSON200.Id),
Refresh: ClusterResourceRefreshFunc(ctx, r.platformClient, r.organizationId, cluster.JSON200.Id),
Timeout: 3 * time.Hour,
MinTimeout: 1 * time.Minute,
}
Expand Down Expand Up @@ -441,7 +441,7 @@ func (r *ClusterResource) Delete(
stateConf := &retry.StateChangeConf{
Pending: []string{string(platform.ClusterStatusCREATING), string(platform.ClusterStatusUPDATING), string(platform.ClusterStatusCREATED), string(platform.ClusterStatusUPDATEFAILED), string(platform.ClusterStatusCREATEFAILED)},
Target: []string{"DELETED"},
Refresh: r.resourceRefreshFunc(ctx, data.Id.ValueString()),
Refresh: ClusterResourceRefreshFunc(ctx, r.platformClient, r.organizationId, data.Id.ValueString()),
Timeout: 1 * time.Hour,
MinTimeout: 30 * time.Second,
}
Expand Down Expand Up @@ -576,38 +576,3 @@ func validateGcpConfig(ctx context.Context, data *models.ClusterResource) diag.D
}
return diags
}

// resourceRefreshFunc returns a retry.StateRefreshFunc that polls the platform API for the cluster status
// If the cluster is not found, it returns "DELETED" status
// If the cluster is found, it returns the cluster status
// If there is an error, it returns the error
// WaitForStateContext will keep polling until the target status is reached, the timeout is reached or an err is returned
func (r *ClusterResource) resourceRefreshFunc(ctx context.Context, clusterId string) retry.StateRefreshFunc {
return func() (any, string, error) {
cluster, err := r.platformClient.GetClusterWithResponse(ctx, r.organizationId, clusterId)
if err != nil {
tflog.Error(ctx, "failed to get cluster while polling for cluster 'CREATED' status", map[string]interface{}{"error": err})
return nil, "", err
}
statusCode, diagnostic := clients.NormalizeAPIError(ctx, cluster.HTTPResponse, cluster.Body)
if statusCode == http.StatusNotFound {
return &platform.Cluster{}, "DELETED", nil
}
if diagnostic != nil {
return nil, "", fmt.Errorf("error getting cluster %s", diagnostic.Detail())
}
if cluster != nil && cluster.JSON200 != nil {
switch cluster.JSON200.Status {
case platform.ClusterStatusCREATED:
return cluster.JSON200, string(cluster.JSON200.Status), nil
case platform.ClusterStatusUPDATEFAILED, platform.ClusterStatusCREATEFAILED:
return cluster.JSON200, string(cluster.JSON200.Status), fmt.Errorf("cluster mutation failed for cluster '%v'", cluster.JSON200.Id)
case platform.ClusterStatusCREATING, platform.ClusterStatusUPDATING:
return cluster.JSON200, string(cluster.JSON200.Status), nil
default:
return cluster.JSON200, string(cluster.JSON200.Status), fmt.Errorf("unexpected cluster status '%v' for cluster '%v'", cluster.JSON200.Status, cluster.JSON200.Id)
}
}
return nil, "", fmt.Errorf("error getting cluster %s", clusterId)
}
}
14 changes: 4 additions & 10 deletions internal/provider/resources/resource_deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ import (
"github.com/stretchr/testify/assert"
)

// We will test dedicated deployment resources once dedicated_cluster_resource is implemented

func TestAcc_ResourceDeploymentHybrid(t *testing.T) {
namePrefix := utils.GenerateTestResourceName(10)

Expand Down Expand Up @@ -576,13 +574,9 @@ func hybridDeployment(input hybridDeploymentInput) string {
} else {
taskPodNodePoolIdStr = fmt.Sprintf(`task_pod_node_pool_id = "%v"`, input.NodePoolId)
}
return fmt.Sprintf(`
resource "astro_workspace" "%v_workspace" {
name = "%s"
description = "%s"
cicd_enforced_default = true
}

workspaceId := strings.Split(os.Getenv("HYBRID_WORKSPACE_IDS"), ",")[0]
return fmt.Sprintf(`
resource "astro_deployment" "%v" {
name = "%s"
description = "%s"
Expand All @@ -594,13 +588,13 @@ resource "astro_deployment" "%v" {
is_dag_deploy_enabled = true
scheduler_au = %v
scheduler_replicas = 1
workspace_id = astro_workspace.%v_workspace.id
workspace_id = "%v"
%v
%v
%v
}
`,
input.Name, input.Name, utils.TestResourceDescription, input.Name, input.Name, input.Description, input.ClusterId, input.Executor, input.SchedulerAu, input.Name,
input.Name, input.Name, utils.TestResourceDescription, input.ClusterId, input.Executor, input.SchedulerAu, workspaceId,
envVarsStr(input.IncludeEnvironmentVariables), wqStr, taskPodNodePoolIdStr)
}

Expand Down
Loading

0 comments on commit d87be6d

Please sign in to comment.