diff --git a/examples/data-sources/biganimal_cluster/data-source.tf b/examples/data-sources/biganimal_cluster/data-source.tf index d7c4c47d..67630358 100644 --- a/examples/data-sources/biganimal_cluster/data-source.tf +++ b/examples/data-sources/biganimal_cluster/data-source.tf @@ -93,6 +93,10 @@ output "storage" { value = data.biganimal_cluster.this.storage } +output "wal_storage" { + value = data.biganimal_cluster.this.wal_storage +} + output "superuser_access" { value = coalesce(data.biganimal_cluster.this.superuser_access, false) } diff --git a/examples/data-sources/biganimal_faraway_replica/data-source.tf b/examples/data-sources/biganimal_faraway_replica/data-source.tf index d3ece514..27a4d80a 100644 --- a/examples/data-sources/biganimal_faraway_replica/data-source.tf +++ b/examples/data-sources/biganimal_faraway_replica/data-source.tf @@ -93,6 +93,10 @@ output "storage" { value = data.biganimal_faraway_replica.this.storage } +output "wal_storage" { + value = data.biganimal_faraway_replica.this.wal_storage +} + output "volume_snapshot_backup" { value = data.biganimal_faraway_replica.this.volume_snapshot_backup } diff --git a/examples/resources/biganimal_cluster/ha/resource.tf b/examples/resources/biganimal_cluster/ha/resource.tf index 988e52e2..78bf6e16 100644 --- a/examples/resources/biganimal_cluster/ha/resource.tf +++ b/examples/resources/biganimal_cluster/ha/resource.tf @@ -69,6 +69,12 @@ resource "biganimal_cluster" "ha_cluster" { size = "4 Gi" } + # wal_storage = { + # volume_type = "gp3" + # volume_properties = "gp3" + # size = "4 Gi" + # } + maintenance_window = { is_enabled = true start_day = 6 diff --git a/examples/resources/biganimal_cluster/single_node/aws/resource.tf b/examples/resources/biganimal_cluster/single_node/aws/resource.tf index 06fc06a1..0e570320 100644 --- a/examples/resources/biganimal_cluster/single_node/aws/resource.tf +++ b/examples/resources/biganimal_cluster/single_node/aws/resource.tf @@ -70,6 +70,12 @@ resource "biganimal_cluster" "single_node_cluster" { size = "4 Gi" } + # wal_storage = { + # volume_type = "gp3" + # volume_properties = "gp3" + # size = "4 Gi" + # } + maintenance_window = { is_enabled = true start_day = 6 diff --git a/examples/resources/biganimal_cluster/single_node/azure/resource.tf b/examples/resources/biganimal_cluster/single_node/azure/resource.tf index 9bcf0f43..866e8057 100644 --- a/examples/resources/biganimal_cluster/single_node/azure/resource.tf +++ b/examples/resources/biganimal_cluster/single_node/azure/resource.tf @@ -67,9 +67,15 @@ resource "biganimal_cluster" "single_node_cluster" { storage = { volume_type = "azurepremiumstorage" volume_properties = "P1" - size = "4 Gi" + size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P1" + # size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance + # } + maintenance_window = { is_enabled = true start_day = 6 diff --git a/examples/resources/biganimal_cluster/single_node/gcp/resource.tf b/examples/resources/biganimal_cluster/single_node/gcp/resource.tf index d473f5b4..9bb55c49 100644 --- a/examples/resources/biganimal_cluster/single_node/gcp/resource.tf +++ b/examples/resources/biganimal_cluster/single_node/gcp/resource.tf @@ -70,6 +70,12 @@ resource "biganimal_cluster" "single_node_cluster" { size = "10 Gi" } + # wal_storage = { + # volume_type = "pd-ssd" + # volume_properties = "pd-ssd" + # size = "10 Gi" + # } + maintenance_window = { is_enabled = true start_day = 6 diff --git a/examples/resources/biganimal_faraway_replica/aws/resource.tf b/examples/resources/biganimal_faraway_replica/aws/resource.tf index 6eea61bd..90aabd58 100644 --- a/examples/resources/biganimal_faraway_replica/aws/resource.tf +++ b/examples/resources/biganimal_faraway_replica/aws/resource.tf @@ -67,6 +67,11 @@ resource "biganimal_faraway_replica" "faraway_replica" { volume_properties = "gp3" size = "4 Gi" } + # wal_storage = { + # volume_type = "gp3" + # volume_properties = "gp3" + # size = "4 Gi" + # } private_networking = false region = "ap-south-1" diff --git a/examples/resources/biganimal_faraway_replica/azure/resource.tf b/examples/resources/biganimal_faraway_replica/azure/resource.tf index cd9a6543..f0d30d82 100644 --- a/examples/resources/biganimal_faraway_replica/azure/resource.tf +++ b/examples/resources/biganimal_faraway_replica/azure/resource.tf @@ -65,8 +65,13 @@ resource "biganimal_faraway_replica" "faraway_replica" { storage = { volume_type = "azurepremiumstorage" volume_properties = "P1" - size = "4 Gi" + size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P1" + # size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance + # } private_networking = false region = "australiaeast" diff --git a/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf b/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf index 74db7bf2..326fdd0c 100644 --- a/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf +++ b/examples/resources/biganimal_faraway_replica/cluster_and_faraway_replica/resource.tf @@ -42,9 +42,15 @@ resource "biganimal_cluster" "single_node_cluster" { storage = { volume_type = "azurepremiumstorage" volume_properties = "P1" - size = "4 Gi" + size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P1" + # size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance + # } + pg_type = "epas" #valid values ["epas", "pgextended", "postgres]" pg_version = "15" cloud_provider = "azure" @@ -101,8 +107,13 @@ resource "biganimal_faraway_replica" "faraway_replica" { storage = { volume_type = "azurepremiumstorage" volume_properties = "P1" - size = "4 Gi" + size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P1" + # size = "4 Gi" # for azurepremiumstorage please check Premium storage disk sizes here: https://learn.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance + # } private_networking = false region = "centralindia" diff --git a/examples/resources/biganimal_faraway_replica/gcp/resource.tf b/examples/resources/biganimal_faraway_replica/gcp/resource.tf index e2ac9242..b3be1eaa 100644 --- a/examples/resources/biganimal_faraway_replica/gcp/resource.tf +++ b/examples/resources/biganimal_faraway_replica/gcp/resource.tf @@ -67,6 +67,11 @@ resource "biganimal_faraway_replica" "faraway_replica" { volume_properties = "pd-ssd" size = "4 Gi" } + # wal_storage = { + # volume_type = "pd-ssd" + # volume_properties = "pd-ssd" + # size = "4 Gi" + # } private_networking = false region = "us-east1" diff --git a/examples/resources/biganimal_pgd/aws/data_group/resource.tf b/examples/resources/biganimal_pgd/aws/data_group/resource.tf index f029b334..0ce24dce 100644 --- a/examples/resources/biganimal_pgd/aws/data_group/resource.tf +++ b/examples/resources/biganimal_pgd/aws/data_group/resource.tf @@ -78,6 +78,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "gp3" size = "32 Gi" } + # wal_storage = { + # volume_type = "gp3" + # volume_properties = "gp3" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } diff --git a/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf index 09ef74f7..15f3f4dd 100644 --- a/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf +++ b/examples/resources/biganimal_pgd/aws/data_groups_with_witness_group/resource.tf @@ -78,6 +78,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "gp3" size = "32 Gi" } + # wal_storage = { + # volume_type = "gp3" + # volume_properties = "gp3" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } @@ -136,6 +141,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "gp3" size = "32 Gi" } + # wal_storage = { + # volume_type = "gp3" + # volume_properties = "gp3" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } diff --git a/examples/resources/biganimal_pgd/azure/data_group/resource.tf b/examples/resources/biganimal_pgd/azure/data_group/resource.tf index 3a66f61f..9867c055 100644 --- a/examples/resources/biganimal_pgd/azure/data_group/resource.tf +++ b/examples/resources/biganimal_pgd/azure/data_group/resource.tf @@ -78,6 +78,16 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "P2" size = "32 Gi" } + storage = { + volume_type = "azurepremiumstorage" + volume_properties = "P2" + size = "32 Gi" + } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P2" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } diff --git a/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf index 5dbed951..f6909e73 100644 --- a/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf +++ b/examples/resources/biganimal_pgd/azure/data_groups_with_witness_group/resource.tf @@ -78,6 +78,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "P2" size = "32 Gi" } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P2" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } @@ -136,6 +141,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "P2" size = "32 Gi" } + # wal_storage = { + # volume_type = "azurepremiumstorage" + # volume_properties = "P2" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } diff --git a/examples/resources/biganimal_pgd/gcp/data_group/resource.tf b/examples/resources/biganimal_pgd/gcp/data_group/resource.tf index 4fa2cb2f..6f5553d3 100644 --- a/examples/resources/biganimal_pgd/gcp/data_group/resource.tf +++ b/examples/resources/biganimal_pgd/gcp/data_group/resource.tf @@ -78,6 +78,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "pd-ssd" size = "32 Gi" } + # wal_storage = { + # volume_type = "pd-ssd" + # volume_properties = "pd-ssd" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } diff --git a/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf b/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf index 81884dad..5ec94639 100644 --- a/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf +++ b/examples/resources/biganimal_pgd/gcp/data_groups_with_witness_group/resource.tf @@ -78,6 +78,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "pd-ssd" size = "32 Gi" } + # wal_storage = { + # volume_type = "pd-ssd" + # volume_properties = "pd-ssd" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } @@ -140,6 +145,11 @@ resource "biganimal_pgd" "pgd_cluster" { volume_properties = "pd-ssd" size = "32 Gi" } + # wal_storage = { + # volume_type = "pd-ssd" + # volume_properties = "pd-ssd" + # size = "32 Gi" + # } pg_type = { pg_type_id = "epas" #valid values ["epas", "pgextended", "postgres]" } diff --git a/pkg/models/cluster.go b/pkg/models/cluster.go index 6682fa9c..a2a7cf04 100644 --- a/pkg/models/cluster.go +++ b/pkg/models/cluster.go @@ -187,6 +187,7 @@ type Cluster struct { EncryptionKeyResp *EncryptionKey `json:"encryptionKey,omitempty"` PgIdentity *string `json:"pgIdentity,omitempty"` BackupScheduleTime *string `json:"scheduleBackup,omitempty"` + WalStorage *Storage `json:"walStorage,omitempty"` } // IsHealthy checks to see if the cluster has the right condition 'biganimal.com/deployed' diff --git a/pkg/models/pgd/api/data_group.go b/pkg/models/pgd/api/data_group.go index b1857af7..2a36aa73 100644 --- a/pkg/models/pgd/api/data_group.go +++ b/pkg/models/pgd/api/data_group.go @@ -33,4 +33,5 @@ type DataGroup struct { RoConnectionUri *string `json:"roConnectionUri,omitempty"` ReadOnlyConnections *bool `json:"readOnlyConnections,omitempty"` BackupScheduleTime *string `json:"scheduleBackup,omitempty"` + WalStorage *models.Storage `json:"walStorage,omitempty"` } diff --git a/pkg/models/pgd/terraform/data_group.go b/pkg/models/pgd/terraform/data_group.go index 23c6e4bc..3f7393ed 100644 --- a/pkg/models/pgd/terraform/data_group.go +++ b/pkg/models/pgd/terraform/data_group.go @@ -34,4 +34,5 @@ type DataGroup struct { RoConnectionUri types.String `tfsdk:"ro_connection_uri"` ReadOnlyConnections *bool `tfsdk:"read_only_connections"` BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` + WalStorage *Storage `tfsdk:"wal_storage"` } diff --git a/pkg/plan_modifier/cloud_provider.go b/pkg/plan_modifier/cloud_provider.go new file mode 100644 index 00000000..b3bad7fa --- /dev/null +++ b/pkg/plan_modifier/cloud_provider.go @@ -0,0 +1,59 @@ +package plan_modifier + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func CustomClusterCloudProvider() planmodifier.String { + return customCloudProviderModifier{} +} + +type customCloudProviderModifier struct{} + +func (m customCloudProviderModifier) Description(_ context.Context) string { + return "Once set, the value of this attribute in state will not change." +} + +func (m customCloudProviderModifier) MarkdownDescription(_ context.Context) string { + return "Once set, the value of this attribute in state will not change." +} + +func (m customCloudProviderModifier) PlanModifyString(ctx context.Context, req planmodifier.StringRequest, resp *planmodifier.StringResponse) { + cloudProviderConfig := req.ConfigValue.ValueString() + var configObject map[string]tftypes.Value + + err := req.Config.Raw.As(&configObject) + if err != nil { + resp.Diagnostics.AddError("Mapping config object in custom cloud provider modifier error", err.Error()) + return + } + + if !strings.Contains(cloudProviderConfig, "bah") { + peIds, ok := configObject["pe_allowed_principal_ids"] + if ok && !peIds.IsNull() { + resp.Diagnostics.AddError("your cloud account 'pe_allowed_principal_ids' field not allowed error", + "field 'pe_allowed_principal_ids' should only be set if you are using BigAnimal's cloud account e.g. 'bah:aws', please remove 'pe_allowed_principal_ids'") + return + } + + saIds, ok := configObject["service_account_ids"] + if ok && !saIds.IsNull() { + resp.Diagnostics.AddError("your cloud account 'service_account_ids' field not allowed error", + "field 'service_account_ids' should only be set if you are using BigAnimal's cloud account 'bah:gcp', please remove 'service_account_ids'") + return + } + } + + if strings.Contains(cloudProviderConfig, "bah") && !strings.Contains(cloudProviderConfig, "bah:gcp") { + saIds, ok := configObject["service_account_ids"] + if ok && !saIds.IsNull() { + resp.Diagnostics.AddError("your cloud account 'service_account_ids' field not allowed error", + "you are not using cloud provider 'bah:gcp', field 'service_account_ids' should only be set if you are using cloud provider 'bah:gcp', please remove 'service_account_ids'") + return + } + } +} diff --git a/pkg/plan_modifier/data_group_custom_diff.go b/pkg/plan_modifier/data_group_custom_diff.go index 0df65780..db207562 100644 --- a/pkg/plan_modifier/data_group_custom_diff.go +++ b/pkg/plan_modifier/data_group_custom_diff.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "reflect" + "strings" "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/pgd/terraform" "github.com/hashicorp/terraform-plugin-framework/attr" @@ -32,6 +33,44 @@ func (m CustomDataGroupDiffModifier) MarkdownDescription(_ context.Context) stri // PlanModifyList implements the plan modification logic. func (m CustomDataGroupDiffModifier) PlanModifyList(ctx context.Context, req planmodifier.ListRequest, resp *planmodifier.ListResponse) { + var stateDgsObs []terraform.DataGroup + diag := req.StateValue.ElementsAs(ctx, &stateDgsObs, false) + if diag.ErrorsCount() > 0 { + resp.Diagnostics.Append(diag...) + return + } + + var planDgsObs []terraform.DataGroup + diag = resp.PlanValue.ElementsAs(ctx, &planDgsObs, false) + if diag.ErrorsCount() > 0 { + resp.Diagnostics.Append(diag...) + return + } + + // validations + for _, pDg := range planDgsObs { + // validation to remove principal ids and service account ids if cloud provider is not bah + if !strings.Contains(*pDg.Provider.CloudProviderId, "bah") { + if !pDg.PeAllowedPrincipalIds.IsNull() && len(pDg.PeAllowedPrincipalIds.Elements()) > 0 { + resp.Diagnostics.AddError("your cloud account 'pe_allowed_principal_ids' field not allowed error", + fmt.Sprintf("field 'pe_allowed_principal_ids' for region %v should only be set if you are using BigAnimal's cloud account e.g. 'bah:aws', please remove 'pe_allowed_principal_ids'\n", pDg.Region.RegionId)) + return + } + + if !pDg.ServiceAccountIds.IsNull() && len(pDg.ServiceAccountIds.Elements()) > 0 { + resp.Diagnostics.AddError("your cloud account 'service_account_ids' field not allowed error", + fmt.Sprintf("field 'service_account_ids' for region %v should only be set if you are using BigAnimal's cloud account 'bah:gcp', please remove 'service_account_ids'\n", pDg.Region.RegionId)) + return + } + } else if strings.Contains(*pDg.Provider.CloudProviderId, "bah") && !strings.Contains(*pDg.Provider.CloudProviderId, "bah:gcp") { + if !pDg.ServiceAccountIds.IsNull() && len(pDg.ServiceAccountIds.Elements()) > 0 { + resp.Diagnostics.AddError("your cloud account 'service_account_ids' field not allowed error", + fmt.Sprintf("you are not using BigAnimal's cloud account 'bah:gcp' for region %v, field 'service_account_ids' should only be set if you are using BigAnimal's cloud account 'bah:gcp', please remove 'service_account_ids'", pDg.Region.RegionId)) + return + } + } + } + if req.StateValue.IsNull() { // private networking case when doing create var planDgsObs []terraform.DataGroup @@ -87,20 +126,6 @@ func (m CustomDataGroupDiffModifier) PlanModifyList(ctx context.Context, req pla newDgPlan := []terraform.DataGroup{} - var stateDgsObs []terraform.DataGroup - diag := req.StateValue.ElementsAs(ctx, &stateDgsObs, false) - if diag.ErrorsCount() > 0 { - resp.Diagnostics.Append(diag...) - return - } - - var planDgsObs []terraform.DataGroup - diag = resp.PlanValue.ElementsAs(ctx, &planDgsObs, false) - if diag.ErrorsCount() > 0 { - resp.Diagnostics.Append(diag...) - return - } - // Need to sort the plan according to the state this is so the compare and setting unknowns are correct // https://developer.hashicorp.com/terraform/plugin/framework/resources/plan-modification#caveats // sort the order of the plan the same as the state, state is from the read and plan is from the config @@ -126,6 +151,11 @@ func (m CustomDataGroupDiffModifier) PlanModifyList(ctx context.Context, req pla pDg.Storage.Iops = sDg.Storage.Iops pDg.Storage.Throughput = sDg.Storage.Throughput + if sDg.WalStorage != nil { + pDg.WalStorage.Iops = sDg.WalStorage.Iops + pDg.WalStorage.Throughput = sDg.WalStorage.Throughput + } + // fix to set the correct allowed ip ranges to allow all if a PGD data group has private networking set as true if pDg.PrivateNetworking != nil && *pDg.PrivateNetworking { pDg.AllowedIpRanges = types.SetValueMust(pDg.AllowedIpRanges.ElementType(ctx), []attr.Value{ diff --git a/pkg/provider/common.go b/pkg/provider/common.go index fe8ce344..4d3e9a51 100644 --- a/pkg/provider/common.go +++ b/pkg/provider/common.go @@ -3,7 +3,9 @@ package provider import ( commonApi "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/common/api" commonTerraform "github.com/EnterpriseDB/terraform-provider-biganimal/pkg/models/common/terraform" - "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" ) @@ -38,3 +40,35 @@ var ResourceBackupScheduleTime = schema.StringAttribute{ Optional: true, Computed: true, } + +var resourceWal = schema.SingleNestedAttribute{ + Description: "Use a separate storage volume for Write-Ahead Logs (Recommended for high write workloads)", + Optional: true, + Attributes: map[string]schema.Attribute{ + "iops": schema.StringAttribute{ + Description: "IOPS for the selected volume. It can be set to different values depending on your volume type and properties.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "size": schema.StringAttribute{ + Description: "Size of the volume. It can be set to different values depending on your volume type and properties.", + Required: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "throughput": schema.StringAttribute{ + Description: "Throughput is automatically calculated by BigAnimal based on the IOPS input if it's not provided.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, + }, + "volume_properties": schema.StringAttribute{ + Description: "Volume properties in accordance with the selected volume type.", + Required: true, + }, + "volume_type": schema.StringAttribute{ + Description: "Volume type. For Azure: \"azurepremiumstorage\" or \"ultradisk\". For AWS: \"gp3\", \"io2\", org s \"io2-block-express\". For Google Cloud: only \"pd-ssd\".", + Required: true, + }, + }, +} diff --git a/pkg/provider/data_source_cluster.go b/pkg/provider/data_source_cluster.go index c9c33329..56e55014 100644 --- a/pkg/provider/data_source_cluster.go +++ b/pkg/provider/data_source_cluster.go @@ -389,6 +389,7 @@ func (c *clusterDataSource) Schema(ctx context.Context, req datasource.SchemaReq Computed: true, }, "backup_schedule_time": ResourceBackupScheduleTime, + "wal_storage": resourceWal, }, } } diff --git a/pkg/provider/data_source_fareplica.go b/pkg/provider/data_source_fareplica.go index 6ffe2620..4d6e9cf4 100644 --- a/pkg/provider/data_source_fareplica.go +++ b/pkg/provider/data_source_fareplica.go @@ -272,6 +272,7 @@ func (c *FAReplicaData) Schema(ctx context.Context, req datasource.SchemaRequest }, }, "backup_schedule_time": ResourceBackupScheduleTime, + "wal_storage": resourceWal, }, } } diff --git a/pkg/provider/data_source_pgd.go b/pkg/provider/data_source_pgd.go index 374cabd5..31e75f8c 100644 --- a/pkg/provider/data_source_pgd.go +++ b/pkg/provider/data_source_pgd.go @@ -285,6 +285,7 @@ func (p pgdDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, Computed: true, }, "backup_schedule_time": ResourceBackupScheduleTime, + "wal_storage": resourceWal, }, }, }, diff --git a/pkg/provider/resource_analytics_cluster.go b/pkg/provider/resource_analytics_cluster.go index f85353c3..163e96b3 100644 --- a/pkg/provider/resource_analytics_cluster.go +++ b/pkg/provider/resource_analytics_cluster.go @@ -170,8 +170,9 @@ func (r *analyticsClusterResource) Schema(ctx context.Context, req resource.Sche PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, }, "cloud_provider": schema.StringAttribute{ - Description: "Cloud provider. For example, \"aws\" or \"bah:aws\".", - Required: true, + Description: "Cloud provider. For example, \"aws\" or \"bah:aws\".", + Required: true, + PlanModifiers: []planmodifier.String{plan_modifier.CustomClusterCloudProvider()}, }, "pg_type": schema.StringAttribute{ MarkdownDescription: "Postgres type. For example, \"epas\" or \"pgextended\".", diff --git a/pkg/provider/resource_cluster.go b/pkg/provider/resource_cluster.go index e0fcc5b4..e748a7ae 100644 --- a/pkg/provider/resource_cluster.go +++ b/pkg/provider/resource_cluster.go @@ -83,6 +83,7 @@ type ClusterResourceModel struct { Tags []commonTerraform.Tag `tfsdk:"tags"` ServiceName types.String `tfsdk:"service_name"` BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` + WalStorage *StorageResourceModel `tfsdk:"wal_storage"` Timeouts timeouts.Value `tfsdk:"timeouts"` } @@ -273,7 +274,7 @@ func (c *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest Required: true, }, "volume_type": schema.StringAttribute{ - Description: "Volume type. For Azure: \"azurepremiumstorage\" or \"ultradisk\". For AWS: \"gp3\", \"io2\", org s \"io2-block-express\". For Google Cloud: only \"pd-ssd\".", + Description: "Volume type. For Azure: \"azurepremiumstorage\" or \"ultradisk\". For AWS: \"gp3\", \"io2\", or \"io2-block-express\". For Google Cloud: only \"pd-ssd\".", Required: true, }, }, @@ -326,8 +327,9 @@ func (c *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, }, "cloud_provider": schema.StringAttribute{ - Description: "Cloud provider. For example, \"aws\", \"azure\", \"gcp\" or \"bah:aws\", \"bah:gcp\".", - Required: true, + Description: "Cloud provider. For example, \"aws\", \"azure\", \"gcp\" or \"bah:aws\", \"bah:gcp\".", + Required: true, + PlanModifiers: []planmodifier.String{plan_modifier.CustomClusterCloudProvider()}, }, "pg_type": schema.StringAttribute{ MarkdownDescription: "Postgres type. For example, \"epas\", \"pgextended\", or \"postgres\".", @@ -577,6 +579,7 @@ func (c *clusterResource) Schema(ctx context.Context, req resource.SchemaRequest PlanModifiers: []planmodifier.String{stringplanmodifier.UseStateForUnknown()}, }, "backup_schedule_time": ResourceBackupScheduleTime, + "wal_storage": resourceWal, }, } } @@ -866,6 +869,13 @@ func readCluster(ctx context.Context, client *api.ClusterClient, tfClusterResour tfClusterResource.SuperuserAccess = types.BoolPointerValue(responseCluster.SuperuserAccess) tfClusterResource.PgIdentity = types.StringPointerValue(responseCluster.PgIdentity) tfClusterResource.VolumeSnapshot = types.BoolPointerValue(responseCluster.VolumeSnapshot) + tfClusterResource.WalStorage = &StorageResourceModel{ + VolumeType: types.StringPointerValue(responseCluster.WalStorage.VolumeTypeId), + VolumeProperties: types.StringPointerValue(responseCluster.WalStorage.VolumePropertiesId), + Size: types.StringPointerValue(responseCluster.WalStorage.Size), + Iops: types.StringPointerValue(responseCluster.WalStorage.Iops), + Throughput: types.StringPointerValue(responseCluster.WalStorage.Throughput), + } if responseCluster.EncryptionKeyResp != nil && *responseCluster.Phase != constants.PHASE_HEALTHY { if !tfClusterResource.PgIdentity.IsNull() && tfClusterResource.PgIdentity.ValueString() != "" { @@ -1106,6 +1116,16 @@ func (c *clusterResource) generateGenericClusterModel(ctx context.Context, clust VolumeSnapshot: clusterResource.VolumeSnapshot.ValueBoolPointer(), } + if clusterResource.WalStorage != nil { + cluster.WalStorage = &models.Storage{ + VolumePropertiesId: clusterResource.WalStorage.VolumeProperties.ValueStringPointer(), + VolumeTypeId: clusterResource.WalStorage.VolumeType.ValueStringPointer(), + Iops: clusterResource.WalStorage.Iops.ValueStringPointer(), + Size: clusterResource.WalStorage.Size.ValueStringPointer(), + Throughput: clusterResource.WalStorage.Throughput.ValueStringPointer(), + } + } + cluster.Extensions = &[]models.ClusterExtension{} if clusterResource.Pgvector.ValueBool() { *cluster.Extensions = append(*cluster.Extensions, models.ClusterExtension{Enabled: true, ExtensionId: "pgvector"}) diff --git a/pkg/provider/resource_fareplica.go b/pkg/provider/resource_fareplica.go index 9f95e826..74c2b532 100644 --- a/pkg/provider/resource_fareplica.go +++ b/pkg/provider/resource_fareplica.go @@ -66,6 +66,7 @@ type FAReplicaResourceModel struct { VolumeSnapshot types.Bool `tfsdk:"volume_snapshot_backup"` Tags []commonTerraform.Tag `tfsdk:"tags"` BackupScheduleTime types.String `tfsdk:"backup_schedule_time"` + WalStorage *StorageResourceModel `tfsdk:"wal_storage"` Timeouts timeouts.Value `tfsdk:"timeouts"` } @@ -424,6 +425,7 @@ func (r *FAReplicaResource) Schema(ctx context.Context, req resource.SchemaReque }, }, "backup_schedule_time": ResourceBackupScheduleTime, + "wal_storage": resourceWal, }, } } @@ -639,6 +641,13 @@ func readFAReplica(ctx context.Context, client *api.ClusterClient, fAReplicaReso fAReplicaResourceModel.PgVersion = types.StringValue(responseCluster.PgVersion.PgVersionId) fAReplicaResourceModel.PgType = types.StringValue(responseCluster.PgType.PgTypeId) fAReplicaResourceModel.VolumeSnapshot = types.BoolPointerValue(responseCluster.VolumeSnapshot) + fAReplicaResourceModel.WalStorage = &StorageResourceModel{ + VolumeType: types.StringPointerValue(responseCluster.WalStorage.VolumeTypeId), + VolumeProperties: types.StringPointerValue(responseCluster.WalStorage.VolumePropertiesId), + Size: types.StringPointerValue(responseCluster.WalStorage.Size), + Iops: types.StringPointerValue(responseCluster.WalStorage.Iops), + Throughput: types.StringPointerValue(responseCluster.WalStorage.Throughput), + } // pgConfig. If tf resource pg config elem matches with api response pg config elem then add the elem to tf resource pg config newPgConfig := []PgConfigResourceModel{} @@ -774,6 +783,16 @@ func (r *FAReplicaResource) generateGenericFAReplicaModel(ctx context.Context, f BackupScheduleTime: fAReplicaResourceModel.BackupScheduleTime.ValueStringPointer(), } + if fAReplicaResourceModel.WalStorage != nil { + cluster.WalStorage = &models.Storage{ + VolumePropertiesId: fAReplicaResourceModel.WalStorage.VolumeProperties.ValueStringPointer(), + VolumeTypeId: fAReplicaResourceModel.WalStorage.VolumeType.ValueStringPointer(), + Iops: fAReplicaResourceModel.WalStorage.Iops.ValueStringPointer(), + Size: fAReplicaResourceModel.WalStorage.Size.ValueStringPointer(), + Throughput: fAReplicaResourceModel.WalStorage.Throughput.ValueStringPointer(), + } + } + allowedIpRanges := []models.AllowedIpRange{} for _, ipRange := range fAReplicaResourceModel.AllowedIpRanges { allowedIpRanges = append(allowedIpRanges, models.AllowedIpRange{ diff --git a/pkg/provider/resource_pgd.go b/pkg/provider/resource_pgd.go index 608e8e23..b148eab6 100644 --- a/pkg/provider/resource_pgd.go +++ b/pkg/provider/resource_pgd.go @@ -415,9 +415,9 @@ func PgdSchema(ctx context.Context) schema.Schema { "read_only_connections": schema.BoolAttribute{ Description: "Is read-only connections enabled.", Optional: true, - Computed: true, }, "backup_schedule_time": ResourceBackupScheduleTime, + "wal_storage": resourceWal, }, }, }, @@ -667,6 +667,11 @@ func (p pgdResource) Create(ctx context.Context, req resource.CreateRequest, res storage := buildRequestStorage(*v.Storage) + var walStorage *models.Storage + if v.WalStorage != nil { + walStorage = buildRequestStorage(*v.WalStorage) + } + if v.PgConfig == nil { v.PgConfig = &[]models.KeyValue{} } @@ -706,6 +711,7 @@ func (p pgdResource) Create(ctx context.Context, req resource.CreateRequest, res ServiceAccountIds: svAccIds, PeAllowedPrincipalIds: principalIds, ReadOnlyConnections: v.ReadOnlyConnections, + WalStorage: walStorage, } *clusterReqBody.Groups = append(*clusterReqBody.Groups, apiDGModel) @@ -956,6 +962,11 @@ func (p pgdResource) Update(ctx context.Context, req resource.UpdateRequest, res for _, v := range plan.DataGroups { storage := buildRequestStorage(*v.Storage) + var walStorage *models.Storage + if v.WalStorage != nil { + walStorage = buildRequestStorage(*v.WalStorage) + } + groupId := v.GroupId.ValueStringPointer() if v.GroupId.IsUnknown() { groupId = nil @@ -981,6 +992,7 @@ func (p pgdResource) Update(ctx context.Context, req resource.UpdateRequest, res MaintenanceWindow: v.MaintenanceWindow, ServiceAccountIds: svAccIds, PeAllowedPrincipalIds: principalIds, + WalStorage: walStorage, } // signals that it doesn't have an existing group id so this is a new group to add and needs extra fields @@ -1341,6 +1353,18 @@ func buildTFGroupsAs(ctx context.Context, diags *diag.Diagnostics, state tfsdk.S Throughput: types.StringPointerValue(apiRespDgModel.Storage.Throughput), } + // wal storage + var walStorage *terraform.Storage + if apiRespDgModel.WalStorage != nil { + walStorage = &terraform.Storage{ + Size: types.StringPointerValue(apiRespDgModel.WalStorage.Size), + VolumePropertiesId: types.StringPointerValue(apiRespDgModel.WalStorage.VolumePropertiesId), + VolumeTypeId: types.StringPointerValue(apiRespDgModel.WalStorage.VolumeTypeId), + Iops: types.StringPointerValue(apiRespDgModel.WalStorage.Iops), + Throughput: types.StringPointerValue(apiRespDgModel.WalStorage.Throughput), + } + } + // service account ids serviceAccIds := []attr.Value{} if apiRespDgModel.ServiceAccountIds != nil && len(*apiRespDgModel.ServiceAccountIds) != 0 { @@ -1426,6 +1450,7 @@ func buildTFGroupsAs(ctx context.Context, diags *diag.Diagnostics, state tfsdk.S PeAllowedPrincipalIds: types.SetValueMust(types.StringType, principalIds), RoConnectionUri: types.StringPointerValue(apiRespDgModel.RoConnectionUri), ReadOnlyConnections: apiRespDgModel.ReadOnlyConnections, + WalStorage: walStorage, } outPgdTFResource.DataGroups = append(outPgdTFResource.DataGroups, tfDGModel)