From fe8419b96e8aaf3705d35f463a6606a40a4873b2 Mon Sep 17 00:00:00 2001 From: Ludwig Patte <108891859+lpatte@users.noreply.github.com> Date: Mon, 18 Mar 2024 09:01:46 +0100 Subject: [PATCH] clean deprecated function of cloud_project_database resources (#582) * clean deprecated function of cloud_project_database resources --- ...data_cloud_project_database_integration.go | 2 +- ...ata_cloud_project_database_integrations.go | 2 +- ovh/helpers/hashcode/hashcode.go | 14 +- ...t_database_kafka_schemaregistryacl_test.go | 10 +- ovh/resource_cloud_project_database.go | 12 +- ...esource_cloud_project_database_database.go | 2 +- ...urce_cloud_project_database_integration.go | 4 +- ...e_cloud_project_database_ip_restriction.go | 40 +-- ...source_cloud_project_database_kafka_acl.go | 2 +- ...roject_database_kafka_schemaregistryacl.go | 2 +- ...urce_cloud_project_database_kafka_topic.go | 26 +- ...e_cloud_project_database_m3db_namespace.go | 2 +- ...rce_cloud_project_database_mongodb_user.go | 40 +-- ...oud_project_database_opensearch_pattern.go | 2 +- ovh/resource_cloud_project_database_test.go | 6 +- ovh/resource_cloud_project_database_user.go | 2 +- ovh/types_cloud_project_database.go | 72 ++--- .../v2/helper/retry/error.go | 94 ++++++ .../v2/helper/retry/state.go | 283 ++++++++++++++++++ .../v2/helper/retry/wait.go | 116 +++++++ vendor/modules.txt | 1 + 21 files changed, 612 insertions(+), 122 deletions(-) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go diff --git a/ovh/data_cloud_project_database_integration.go b/ovh/data_cloud_project_database_integration.go index 5892472ca..0e48d81b9 100644 --- a/ovh/data_cloud_project_database_integration.go +++ b/ovh/data_cloud_project_database_integration.go @@ -27,7 +27,7 @@ func dataSourceCloudProjectDatabaseIntegration() *schema.Resource { ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if value == "mongodb" { - errors = append(errors, fmt.Errorf("Value %s is not a valid engine for integration", value)) + errors = append(errors, fmt.Errorf("value %s is not a valid engine for integration", value)) } return }, diff --git a/ovh/data_cloud_project_database_integrations.go b/ovh/data_cloud_project_database_integrations.go index d3013c6f1..5c2cba57f 100644 --- a/ovh/data_cloud_project_database_integrations.go +++ b/ovh/data_cloud_project_database_integrations.go @@ -28,7 +28,7 @@ func dataSourceCloudProjectDatabaseIntegrations() *schema.Resource { ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if value == "mongodb" { - errors = append(errors, fmt.Errorf("Value %s is not a valid engine for integration", value)) + errors = append(errors, fmt.Errorf("value %s is not a valid engine for integration", value)) } return }, diff --git a/ovh/helpers/hashcode/hashcode.go b/ovh/helpers/hashcode/hashcode.go index f0c022dd2..c91a0b8ad 100644 --- a/ovh/helpers/hashcode/hashcode.go +++ b/ovh/helpers/hashcode/hashcode.go @@ -8,13 +8,8 @@ import ( // String hashes a string to a unique hashcode. // -// Deprecated: This will be removed in v2 without replacement. If you need -// its functionality, you can copy it, import crc32 directly, or reference the -// v1 package. -// -// crc32 returns a uint32, but for our use we need -// and non negative integer. Here we cast to an integer -// and invert it if the result is negative. +// Copy from https://github.com/hashicorp/terraform-plugin-sdk/blob/v1.17.2/helper/hashcode/hashcode.go +// following depracation comment func String(s string) int { v := int(crc32.ChecksumIEEE([]byte(s))) if v >= 0 { @@ -29,9 +24,8 @@ func String(s string) int { // Strings hashes a list of strings to a unique hashcode. // -// Deprecated: This will be removed in v2 without replacement. If you need -// its functionality, you can copy it, import crc32 directly, or reference the -// v1 package. +// Copy from https://github.com/hashicorp/terraform-plugin-sdk/blob/v1.17.2/helper/hashcode/hashcode.go +// following depracation comment func Strings(strings []string) string { var buf bytes.Buffer diff --git a/ovh/import_cloud_project_database_kafka_schemaregistryacl_test.go b/ovh/import_cloud_project_database_kafka_schemaregistryacl_test.go index 1c31c9523..c405c2563 100644 --- a/ovh/import_cloud_project_database_kafka_schemaregistryacl_test.go +++ b/ovh/import_cloud_project_database_kafka_schemaregistryacl_test.go @@ -48,7 +48,7 @@ func TestAccCloudProjectDatabaseKafkaSchemaRegistryAcl_importBasic(t *testing.T) ResourceName: "ovh_cloud_project_database_kafka_schemaregistryacl.schemaRegistryAcl", ImportState: true, ImportStateVerify: true, - ImportStateIdFunc: testAccCloudProjectDatabaseKafkaAclImportId("ovh_cloud_project_database_kafka_schemaregistryacl.schemaRegistryAcl"), + ImportStateIdFunc: testAccCloudProjectDatabaseKafkaSchemaRegistryAclImportId("ovh_cloud_project_database_kafka_schemaregistryacl.schemaRegistryAcl"), }, }, }) @@ -56,15 +56,15 @@ func TestAccCloudProjectDatabaseKafkaSchemaRegistryAcl_importBasic(t *testing.T) func testAccCloudProjectDatabaseKafkaSchemaRegistryAclImportId(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { - testKafkaAcl, ok := s.RootModule().Resources[resourceName] + testKafkaSchemaRegistryAcl, ok := s.RootModule().Resources[resourceName] if !ok { return "", fmt.Errorf("ovh_cloud_project_database_kafka_schemaregistryacl not found: %s", resourceName) } return fmt.Sprintf( "%s/%s/%s", - testKafkaAcl.Primary.Attributes["service_name"], - testKafkaAcl.Primary.Attributes["cluster_id"], - testKafkaAcl.Primary.Attributes["id"], + testKafkaSchemaRegistryAcl.Primary.Attributes["service_name"], + testKafkaSchemaRegistryAcl.Primary.Attributes["cluster_id"], + testKafkaSchemaRegistryAcl.Primary.Attributes["id"], ), nil } } diff --git a/ovh/resource_cloud_project_database.go b/ovh/resource_cloud_project_database.go index 310b4a53b..45742f5a0 100644 --- a/ovh/resource_cloud_project_database.go +++ b/ovh/resource_cloud_project_database.go @@ -225,7 +225,7 @@ func resourceCloudProjectDatabaseImportState(d *schema.ResourceData, meta interf n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/engine/databaseId formatted") + return nil, fmt.Errorf("import Id is not service_name/engine/databaseId formatted") } serviceName := splitId[0] engine := splitId[1] @@ -248,7 +248,7 @@ func resourceCloudProjectDatabaseCreate(ctx context.Context, d *schema.ResourceD url.PathEscape(serviceName), url.PathEscape(engine), ) - err, params := (&CloudProjectDatabaseCreateOpts{}).FromResource(d) + params, err := (&CloudProjectDatabaseCreateOpts{}).FromResource(d) if err != nil { return diag.Errorf("service creation failed : %q", err) } @@ -261,7 +261,7 @@ func resourceCloudProjectDatabaseCreate(ctx context.Context, d *schema.ResourceD } log.Printf("[DEBUG] Waiting for database %s to be READY", res.Id) - err = waitForCloudProjectDatabaseReady(config.OVHClient, serviceName, engine, res.Id, d.Timeout(schema.TimeoutCreate)) + err = waitForCloudProjectDatabaseReady(ctx, config.OVHClient, serviceName, engine, res.Id, d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("timeout while waiting database %s to be READY: %s", res.Id, err.Error()) } @@ -342,7 +342,7 @@ func resourceCloudProjectDatabaseUpdate(ctx context.Context, d *schema.ResourceD url.PathEscape(engine), url.PathEscape(d.Id()), ) - err, params := (&CloudProjectDatabaseUpdateOpts{}).FromResource(d) + params, err := (&CloudProjectDatabaseUpdateOpts{}).FromResource(d) if err != nil { return diag.Errorf("service update failed : %q", err) } @@ -353,7 +353,7 @@ func resourceCloudProjectDatabaseUpdate(ctx context.Context, d *schema.ResourceD } log.Printf("[DEBUG] Waiting for database %s to be READY", d.Id()) - err = waitForCloudProjectDatabaseReady(config.OVHClient, serviceName, engine, d.Id(), d.Timeout(schema.TimeoutUpdate)) + err = waitForCloudProjectDatabaseReady(ctx, config.OVHClient, serviceName, engine, d.Id(), d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("timeout while waiting database %s to be READY: %s", d.Id(), err.Error()) } @@ -392,7 +392,7 @@ func resourceCloudProjectDatabaseDelete(ctx context.Context, d *schema.ResourceD } log.Printf("[DEBUG] Waiting for database %s to be DELETED", d.Id()) - err = waitForCloudProjectDatabaseDeleted(config.OVHClient, serviceName, engine, d.Id(), d.Timeout(schema.TimeoutDelete)) + err = waitForCloudProjectDatabaseDeleted(ctx, config.OVHClient, serviceName, engine, d.Id(), d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.Errorf("timeout while waiting database %s to be DELETED: %v", d.Id(), err) } diff --git a/ovh/resource_cloud_project_database_database.go b/ovh/resource_cloud_project_database_database.go index a25099600..32ae0ca71 100644 --- a/ovh/resource_cloud_project_database_database.go +++ b/ovh/resource_cloud_project_database_database.go @@ -70,7 +70,7 @@ func resourceCloudProjectDatabaseDatabaseImportState(d *schema.ResourceData, met n := 4 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/engine/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/engine/cluster_id/id formatted") } serviceName := splitId[0] engine := splitId[1] diff --git a/ovh/resource_cloud_project_database_integration.go b/ovh/resource_cloud_project_database_integration.go index aacd974a1..3d6ff1bff 100644 --- a/ovh/resource_cloud_project_database_integration.go +++ b/ovh/resource_cloud_project_database_integration.go @@ -43,7 +43,7 @@ func resourceCloudProjectDatabaseIntegration() *schema.Resource { ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if value == "mongodb" { - errors = append(errors, fmt.Errorf("Value %s is not a valid engine for integration", value)) + errors = append(errors, fmt.Errorf("value %s is not a valid engine for integration", value)) } return }, @@ -101,7 +101,7 @@ func resourceCloudProjectDatabaseIntegrationImportState(d *schema.ResourceData, n := 4 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/engine/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/engine/cluster_id/id formatted") } serviceName := splitId[0] engine := splitId[1] diff --git a/ovh/resource_cloud_project_database_ip_restriction.go b/ovh/resource_cloud_project_database_ip_restriction.go index 0cfe85a36..edb6a34f8 100644 --- a/ovh/resource_cloud_project_database_ip_restriction.go +++ b/ovh/resource_cloud_project_database_ip_restriction.go @@ -9,8 +9,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/ovh/go-ovh/ovh" "github.com/ovh/terraform-provider-ovh/ovh/helpers" "github.com/ovh/terraform-provider-ovh/ovh/helpers/hashcode" @@ -79,7 +79,7 @@ func resourceCloudProjectDatabaseIpRestrictionImportState(d *schema.ResourceData n := 4 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/engine/cluster_id/ip formatted") + return nil, fmt.Errorf("import Id is not service_name/engine/cluster_id/ip formatted") } serviceName := splitId[0] engine := splitId[1] @@ -111,24 +111,24 @@ func resourceCloudProjectDatabaseIpRestrictionCreate(ctx context.Context, d *sch res := &CloudProjectDatabaseIpRestrictionResponse{} return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), + func() *retry.RetryError { log.Printf("[DEBUG] Will create IP restriction: %+v for cluster %s from project %s", params, clusterId, serviceName) rErr := config.OVHClient.Post(endpoint, params, res) if rErr != nil { if errOvh, ok := rErr.(*ovh.APIError); ok && (errOvh.Code == 409) { if resourceCloudProjectDatabaseIpRestrictionRead(ctx, d, meta) != nil || d.Id() != "" { - return resource.NonRetryableError(fmt.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, rErr)) + return retry.NonRetryableError(fmt.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, rErr)) } - return resource.RetryableError(rErr) + return retry.RetryableError(rErr) } - return resource.NonRetryableError(fmt.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, rErr)) + return retry.NonRetryableError(fmt.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, rErr)) } log.Printf("[DEBUG] Waiting for IP restriction %s to be READY", res.Ip) rErr = waitForCloudProjectDatabaseIpRestrictionReady(ctx, config.OVHClient, serviceName, engine, clusterId, res.Ip, d.Timeout(schema.TimeoutCreate)) if rErr != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting IP restriction %s to be READY: %w", res.Ip, rErr)) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting IP restriction %s to be READY: %w", res.Ip, rErr)) } log.Printf("[DEBUG] IP restriction %s is READY", res.Ip) @@ -137,7 +137,7 @@ func resourceCloudProjectDatabaseIpRestrictionCreate(ctx context.Context, d *sch readDiags := resourceCloudProjectDatabaseIpRestrictionRead(ctx, d, meta) rErr = diagnosticsToError(readDiags) if rErr != nil { - return resource.NonRetryableError(rErr) + return retry.NonRetryableError(rErr) } return nil }, @@ -190,28 +190,28 @@ func resourceCloudProjectDatabaseIpRestrictionUpdate(ctx context.Context, d *sch params := (&CloudProjectDatabaseIpRestrictionUpdateOpts{}).FromResource(d) return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), + func() *retry.RetryError { log.Printf("[DEBUG] Will update IP restriction: %+v from cluster %s from project %s", params, clusterId, serviceName) rErr := config.OVHClient.Put(endpoint, params, nil) if rErr != nil { if errOvh, ok := rErr.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(rErr) + return retry.RetryableError(rErr) } - return resource.NonRetryableError(fmt.Errorf("calling Put %s with params %+v:\n\t %q", endpoint, params, rErr)) + return retry.NonRetryableError(fmt.Errorf("calling Put %s with params %+v:\n\t %q", endpoint, params, rErr)) } log.Printf("[DEBUG] Waiting for IP restriction %s to be READY", ip) rErr = waitForCloudProjectDatabaseIpRestrictionReady(ctx, config.OVHClient, serviceName, engine, clusterId, ip, d.Timeout(schema.TimeoutUpdate)) if rErr != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting IP restriction %s to be READY: %w", ip, rErr)) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting IP restriction %s to be READY: %w", ip, rErr)) } log.Printf("[DEBUG] IP restriction %s is READY", ip) readDiags := resourceCloudProjectDatabaseIpRestrictionRead(ctx, d, meta) rErr = diagnosticsToError(readDiags) if rErr != nil { - return resource.NonRetryableError(rErr) + return retry.NonRetryableError(rErr) } return nil }, @@ -234,17 +234,17 @@ func resourceCloudProjectDatabaseIpRestrictionDelete(ctx context.Context, d *sch ) return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), + func() *retry.RetryError { log.Printf("[DEBUG] Will delete IP restriction %s from cluster %s from project %s", ip, clusterId, serviceName) rErr := config.OVHClient.Delete(endpoint, nil) if rErr != nil { if errOvh, ok := rErr.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(rErr) + return retry.RetryableError(rErr) } rErr = helpers.CheckDeleted(d, rErr, endpoint) if rErr != nil { - return resource.NonRetryableError(rErr) + return retry.NonRetryableError(rErr) } return nil } @@ -252,7 +252,7 @@ func resourceCloudProjectDatabaseIpRestrictionDelete(ctx context.Context, d *sch log.Printf("[DEBUG] Waiting for IP restriction %s to be DELETED", clusterId) rErr = waitForCloudProjectDatabaseIpRestrictionDeleted(ctx, config.OVHClient, serviceName, engine, clusterId, ip, d.Timeout(schema.TimeoutDelete)) if rErr != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting IP restriction %s to be DELETED: %w", clusterId, rErr)) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting IP restriction %s to be DELETED: %w", clusterId, rErr)) } log.Printf("[DEBUG] IP restriction %s is DELETED", clusterId) diff --git a/ovh/resource_cloud_project_database_kafka_acl.go b/ovh/resource_cloud_project_database_kafka_acl.go index fa097a0f8..2f5c69ba8 100644 --- a/ovh/resource_cloud_project_database_kafka_acl.go +++ b/ovh/resource_cloud_project_database_kafka_acl.go @@ -68,7 +68,7 @@ func resourceCloudProjectDatabaseKafkaAclImportState(d *schema.ResourceData, met n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] diff --git a/ovh/resource_cloud_project_database_kafka_schemaregistryacl.go b/ovh/resource_cloud_project_database_kafka_schemaregistryacl.go index 9264039d9..cb91dc34e 100644 --- a/ovh/resource_cloud_project_database_kafka_schemaregistryacl.go +++ b/ovh/resource_cloud_project_database_kafka_schemaregistryacl.go @@ -68,7 +68,7 @@ func resourceCloudProjectDatabaseKafkaSchemaRegistryAclImportState(d *schema.Res n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] diff --git a/ovh/resource_cloud_project_database_kafka_topic.go b/ovh/resource_cloud_project_database_kafka_topic.go index 12132c9d1..8e2d4d728 100644 --- a/ovh/resource_cloud_project_database_kafka_topic.go +++ b/ovh/resource_cloud_project_database_kafka_topic.go @@ -9,8 +9,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/ovh/go-ovh/ovh" "github.com/ovh/terraform-provider-ovh/ovh/helpers" ) @@ -99,7 +99,7 @@ func resourceCloudProjectDatabaseKafkaTopicImportState(d *schema.ResourceData, m n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] @@ -126,21 +126,21 @@ func resourceCloudProjectDatabaseKafkaTopicCreate(ctx context.Context, d *schema res := &CloudProjectDatabaseKafkaTopicResponse{} return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), + func() *retry.RetryError { log.Printf("[DEBUG] Will create topic: %+v for cluster %s from project %s", params, clusterId, serviceName) err := config.OVHClient.Post(endpoint, params, res) if err != nil { if errOvh, ok := err.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(fmt.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, err)) + return retry.NonRetryableError(fmt.Errorf("calling Post %s with params %+v:\n\t %q", endpoint, params, err)) } log.Printf("[DEBUG] Waiting for topic %s to be READY", res.Id) err = waitForCloudProjectDatabaseKafkaTopicReady(ctx, config.OVHClient, serviceName, clusterId, res.Id, d.Timeout(schema.TimeoutCreate)) if err != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting topic %s to be READY: %s", res.Id, err.Error())) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting topic %s to be READY: %s", res.Id, err.Error())) } log.Printf("[DEBUG] topic %s is READY", res.Id) @@ -148,7 +148,7 @@ func resourceCloudProjectDatabaseKafkaTopicCreate(ctx context.Context, d *schema readDiags := resourceCloudProjectDatabaseKafkaTopicRead(ctx, d, meta) err = diagnosticsToError(readDiags) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } return nil }, @@ -198,17 +198,17 @@ func resourceCloudProjectDatabaseKafkaTopicDelete(ctx context.Context, d *schema ) return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), + func() *retry.RetryError { log.Printf("[DEBUG] Will delete topic %s from cluster %s from project %s", id, clusterId, serviceName) err := config.OVHClient.Delete(endpoint, nil) if err != nil { if errOvh, ok := err.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(err) + return retry.RetryableError(err) } err = helpers.CheckDeleted(d, err, endpoint) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } return nil } @@ -216,7 +216,7 @@ func resourceCloudProjectDatabaseKafkaTopicDelete(ctx context.Context, d *schema log.Printf("[DEBUG] Waiting for topic %s to be DELETED", id) err = waitForCloudProjectDatabaseKafkaTopicDeleted(ctx, config.OVHClient, serviceName, clusterId, id, d.Timeout(schema.TimeoutDelete)) if err != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting topic %s to be DELETED: %s", id, err.Error())) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting topic %s to be DELETED: %s", id, err.Error())) } log.Printf("[DEBUG] topic %s is DELETED", id) diff --git a/ovh/resource_cloud_project_database_m3db_namespace.go b/ovh/resource_cloud_project_database_m3db_namespace.go index 1c6fa6be3..e3db3fb1f 100644 --- a/ovh/resource_cloud_project_database_m3db_namespace.go +++ b/ovh/resource_cloud_project_database_m3db_namespace.go @@ -114,7 +114,7 @@ func resourceCloudProjectDatabaseM3dbNamespaceImportState(d *schema.ResourceData n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] diff --git a/ovh/resource_cloud_project_database_mongodb_user.go b/ovh/resource_cloud_project_database_mongodb_user.go index e9ad97e4a..fc36bfc30 100644 --- a/ovh/resource_cloud_project_database_mongodb_user.go +++ b/ovh/resource_cloud_project_database_mongodb_user.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/ovh/go-ovh/ovh" "github.com/ovh/terraform-provider-ovh/ovh/helpers" ) @@ -72,7 +72,7 @@ func resourceCloudProjectDatabaseMongodbUser() *schema.Resource { ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !strings.Contains(value, "@") { - errors = append(errors, fmt.Errorf("Value %s do not have authentication database", value)) + errors = append(errors, fmt.Errorf("value %s do not have authentication database", value)) } return }, @@ -123,22 +123,22 @@ func resourceCloudProjectDatabaseMongodbUserCreate(ctx context.Context, d *schem res := &CloudProjectDatabaseUserResponse{} return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), + func() *retry.RetryError { log.Printf("[DEBUG] Will create user: %+v for cluster %s from project %s", params, clusterId, serviceName) rErr := postFuncCloudProjectDatabaseUser(ctx, d, meta, "mongodb", endpoint, params, res, schema.TimeoutCreate) if rErr != nil { if errOvh, ok := rErr.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(rErr) + return retry.RetryableError(rErr) } - return resource.NonRetryableError(rErr) + return retry.NonRetryableError(rErr) } d.SetId(res.Id) readDiags := resourceCloudProjectDatabaseMongodbUserRead(ctx, d, meta) rErr = diagnosticsToError(readDiags) if rErr != nil { - return resource.NonRetryableError(rErr) + return retry.NonRetryableError(rErr) } return nil }, @@ -191,21 +191,21 @@ func resourceCloudProjectDatabaseMongodbUserUpdate(ctx context.Context, d *schem params := (&CloudProjectDatabaseMongodbUserUpdateOpts{}).FromResource(d) return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), + func() *retry.RetryError { log.Printf("[DEBUG] Will update user: %+v from cluster %s from project %s", params, clusterId, serviceName) rErr := config.OVHClient.Put(endpoint, params, nil) if rErr != nil { if errOvh, ok := rErr.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(rErr) + return retry.RetryableError(rErr) } - return resource.NonRetryableError(fmt.Errorf("calling Put %s with params %s:\n\t %q", endpoint, params, rErr)) + return retry.NonRetryableError(fmt.Errorf("calling Put %s with params %s:\n\t %q", endpoint, params, rErr)) } log.Printf("[DEBUG] Waiting for user %s to be READY", id) rErr = waitForCloudProjectDatabaseUserReady(ctx, config.OVHClient, serviceName, "mongodb", clusterId, id, d.Timeout(schema.TimeoutUpdate)) if rErr != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting user %s to be READY: %w", id, rErr)) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting user %s to be READY: %w", id, rErr)) } log.Printf("[DEBUG] user %s is READY", id) @@ -216,16 +216,16 @@ func resourceCloudProjectDatabaseMongodbUserUpdate(ctx context.Context, d *schem err := postFuncCloudProjectDatabaseUser(ctx, d, meta, "mongodb", pwdResetEndpoint, nil, res, schema.TimeoutUpdate) if err != nil { if errOvh, ok := err.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } } readDiags := resourceCloudProjectDatabaseMongodbUserRead(ctx, d, meta) rErr = diagnosticsToError(readDiags) if rErr != nil { - return resource.NonRetryableError(rErr) + return retry.NonRetryableError(rErr) } return nil }, @@ -246,17 +246,17 @@ func resourceCloudProjectDatabaseMongodbUserDelete(ctx context.Context, d *schem ) return diag.FromErr( - resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), - func() *resource.RetryError { + retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), + func() *retry.RetryError { log.Printf("[DEBUG] Will delete user %s from cluster %s from project %s", id, clusterId, serviceName) err := config.OVHClient.Delete(endpoint, nil) if err != nil { if errOvh, ok := err.(*ovh.APIError); ok && (errOvh.Code == 409) { - return resource.RetryableError(err) + return retry.RetryableError(err) } err = helpers.CheckDeleted(d, err, endpoint) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } return nil } @@ -264,7 +264,7 @@ func resourceCloudProjectDatabaseMongodbUserDelete(ctx context.Context, d *schem log.Printf("[DEBUG] Waiting for user %s to be DELETED", id) err = waitForCloudProjectDatabaseUserDeleted(ctx, config.OVHClient, serviceName, "mongodb", clusterId, id, d.Timeout(schema.TimeoutDelete)) if err != nil { - return resource.NonRetryableError(fmt.Errorf("timeout while waiting user %s to be DELETED: %w", id, err)) + return retry.NonRetryableError(fmt.Errorf("timeout while waiting user %s to be DELETED: %w", id, err)) } log.Printf("[DEBUG] user %s is DELETED", id) diff --git a/ovh/resource_cloud_project_database_opensearch_pattern.go b/ovh/resource_cloud_project_database_opensearch_pattern.go index 1eed38e71..966672e28 100644 --- a/ovh/resource_cloud_project_database_opensearch_pattern.go +++ b/ovh/resource_cloud_project_database_opensearch_pattern.go @@ -62,7 +62,7 @@ func resourceCloudProjectDatabaseOpensearchPatternImportState(d *schema.Resource n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] diff --git a/ovh/resource_cloud_project_database_test.go b/ovh/resource_cloud_project_database_test.go index 70733e1b1..b35840fa0 100644 --- a/ovh/resource_cloud_project_database_test.go +++ b/ovh/resource_cloud_project_database_test.go @@ -1,6 +1,7 @@ package ovh import ( + "context" "fmt" "log" "os" @@ -8,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) @@ -52,9 +54,9 @@ func testSweepCloudProjectDatabase(region string) error { continue } - err = resource.Retry(5*time.Minute, func() *resource.RetryError { + err = retry.RetryContext(context.Background(), 5*time.Minute, func() *retry.RetryError { if err := client.Delete(fmt.Sprintf("/cloud/project/%s/database/%s/%s", serviceName, engineName, databaseId), nil); err != nil { - return resource.RetryableError(err) + return retry.RetryableError(err) } // Successful delete return nil diff --git a/ovh/resource_cloud_project_database_user.go b/ovh/resource_cloud_project_database_user.go index b26e08128..30d15833f 100644 --- a/ovh/resource_cloud_project_database_user.go +++ b/ovh/resource_cloud_project_database_user.go @@ -96,7 +96,7 @@ func resourceCloudProjectDatabaseUserImportState(d *schema.ResourceData, meta in n := 4 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/engine/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/engine/cluster_id/id formatted") } serviceName := splitId[0] engine := splitId[1] diff --git a/ovh/types_cloud_project_database.go b/ovh/types_cloud_project_database.go index 33541df4e..24d28f98d 100644 --- a/ovh/types_cloud_project_database.go +++ b/ovh/types_cloud_project_database.go @@ -10,8 +10,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/ovh/go-ovh/ovh" "github.com/ovh/terraform-provider-ovh/ovh/helpers" "github.com/ybriffa/rfc3339" @@ -176,7 +176,7 @@ type CloudProjectDatabaseNodesPattern struct { Region string `json:"region"` } -func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) (error, *CloudProjectDatabaseCreateOpts) { +func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) (*CloudProjectDatabaseCreateOpts, error) { opts.Description = d.Get("description").(string) opts.Plan = d.Get("plan").(string) @@ -187,7 +187,7 @@ func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) } if err := checkNodesEquality(nodes); err != nil { - return err, nil + return nil, err } opts.NodesPattern = CloudProjectDatabaseNodesPattern{ @@ -203,14 +203,14 @@ func (opts *CloudProjectDatabaseCreateOpts) FromResource(d *schema.ResourceData) regions, err := helpers.StringsFromSchema(d, "backup_regions") if err != nil { - return err, nil + return nil, err } opts.Backups = CloudProjectDatabaseBackups{ Regions: regions, Time: d.Get("backup_time").(string), } - return nil, opts + return opts, nil } type CloudProjectDatabaseUpdateOpts struct { @@ -224,7 +224,7 @@ type CloudProjectDatabaseUpdateOpts struct { Backups CloudProjectDatabaseBackups `json:"backups,omitempty"` } -func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) (error, *CloudProjectDatabaseUpdateOpts) { +func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) (*CloudProjectDatabaseUpdateOpts, error) { engine := d.Get("engine").(string) if engine == "opensearch" { opts.AclsEnabled = d.Get("opensearch_acls_enabled").(bool) @@ -241,7 +241,7 @@ func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) regions, err := helpers.StringsFromSchema(d, "backup_regions") if err != nil { - return err, nil + return nil, err } opts.Backups = CloudProjectDatabaseBackups{ @@ -249,7 +249,7 @@ func (opts *CloudProjectDatabaseUpdateOpts) FromResource(d *schema.ResourceData) Time: d.Get("backup_time").(string), } - return nil, opts + return opts, nil } // This make sure Nodes are homogenous. @@ -282,8 +282,8 @@ func checkNodesEquality(nodes []CloudProjectDatabaseNodes) error { return nil } -func waitForCloudProjectDatabaseReady(client *ovh.Client, serviceName, engine string, databaseId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ +func waitForCloudProjectDatabaseReady(ctx context.Context, client *ovh.Client, serviceName, engine string, databaseId string, timeOut time.Duration) error { + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING", "CREATING", "UPDATING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -305,12 +305,12 @@ func waitForCloudProjectDatabaseReady(client *ovh.Client, serviceName, engine st MinTimeout: 10 * time.Second, } - _, err := stateConf.WaitForState() + _, err := stateConf.WaitForStateContext(ctx) return err } -func waitForCloudProjectDatabaseDeleted(client *ovh.Client, serviceName, engine string, databaseId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ +func waitForCloudProjectDatabaseDeleted(ctx context.Context, client *ovh.Client, serviceName, engine string, databaseId string, timeOut time.Duration) error { + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -335,7 +335,7 @@ func waitForCloudProjectDatabaseDeleted(client *ovh.Client, serviceName, engine PollInterval: 20 * time.Second, } - _, err := stateConf.WaitForState() + _, err := stateConf.WaitForStateContext(ctx) return err } @@ -485,7 +485,7 @@ func (opts *CloudProjectDatabaseIpRestrictionUpdateOpts) FromResource(d *schema. } func waitForCloudProjectDatabaseIpRestrictionReady(ctx context.Context, client *ovh.Client, serviceName, engine string, databaseId string, ip string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING", "CREATING", "UPDATING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -513,7 +513,7 @@ func waitForCloudProjectDatabaseIpRestrictionReady(ctx context.Context, client * } func waitForCloudProjectDatabaseIpRestrictionDeleted(ctx context.Context, client *ovh.Client, serviceName, engine string, databaseId string, ip string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -587,7 +587,7 @@ func importCloudProjectDatabaseUser(d *schema.ResourceData, meta interface{}) ([ n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] @@ -611,7 +611,7 @@ func postCloudProjectDatabaseUser(ctx context.Context, d *schema.ResourceData, m return updateFunc(ctx, d, meta) } if engine == "grafana" && name != "avnadmin" { - return diag.FromErr(fmt.Errorf("The Grafana engine does not allow to create a user resource other than avnadmin")) + return diag.FromErr(fmt.Errorf("the Grafana engine does not allow to create a user resource other than avnadmin")) } serviceName := d.Get("service_name").(string) @@ -743,7 +743,7 @@ func deleteCloudProjectDatabaseUser(ctx context.Context, d *schema.ResourceData, } func waitForCloudProjectDatabaseUserReady(ctx context.Context, client *ovh.Client, serviceName, engine string, databaseId string, userId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING", "CREATING", "UPDATING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -771,7 +771,7 @@ func waitForCloudProjectDatabaseUserReady(ctx context.Context, client *ovh.Clien } func waitForCloudProjectDatabaseUserDeleted(ctx context.Context, client *ovh.Client, serviceName, engine string, databaseId string, userId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -838,7 +838,7 @@ func (opts *CloudProjectDatabaseDatabaseCreateOpts) FromResource(d *schema.Resou } func waitForCloudProjectDatabaseDatabaseReady(ctx context.Context, client *ovh.Client, serviceName, engine string, serviceId string, databaseId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -868,7 +868,7 @@ func waitForCloudProjectDatabaseDatabaseReady(ctx context.Context, client *ovh.C } func waitForCloudProjectDatabaseDatabaseDeleted(ctx context.Context, client *ovh.Client, serviceName, engine string, serviceId string, databaseId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -952,7 +952,7 @@ func (opts *CloudProjectDatabaseIntegrationCreateOpts) FromResource(d *schema.Re } func waitForCloudProjectDatabaseIntegrationReady(ctx context.Context, client *ovh.Client, serviceName, engine string, serviceId string, integrationId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -982,7 +982,7 @@ func waitForCloudProjectDatabaseIntegrationReady(ctx context.Context, client *ov } func waitForCloudProjectDatabaseIntegrationDeleted(ctx context.Context, client *ovh.Client, serviceName, engine string, serviceId string, integrationId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -1406,7 +1406,7 @@ func (opts *CloudProjectDatabaseM3dbNamespaceUpdateOpts) FromResource(d *schema. } func waitForCloudProjectDatabaseM3dbNamespaceReady(ctx context.Context, client *ovh.Client, serviceName, databaseId string, namespaceId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -1435,7 +1435,7 @@ func waitForCloudProjectDatabaseM3dbNamespaceReady(ctx context.Context, client * } func waitForCloudProjectDatabaseM3dbNamespaceDeleted(ctx context.Context, client *ovh.Client, serviceName, databaseId string, namespaceId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -1593,7 +1593,7 @@ func (opts *CloudProjectDatabaseOpensearchPatternCreateOpts) FromResource(d *sch } func waitForCloudProjectDatabaseOpensearchPatternReady(ctx context.Context, client *ovh.Client, serviceName, databaseId string, patternId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -1622,7 +1622,7 @@ func waitForCloudProjectDatabaseOpensearchPatternReady(ctx context.Context, clie } func waitForCloudProjectDatabaseOpensearchPatternDeleted(ctx context.Context, client *ovh.Client, serviceName, databaseId string, patternId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -1709,7 +1709,7 @@ func (opts *CloudProjectDatabaseKafkaTopicCreateOpts) FromResource(d *schema.Res func validateIsSupEqual(v, min int) (errors []error) { if v < min { - errors = append(errors, fmt.Errorf("Value %d is inferior of min value %d", v, min)) + errors = append(errors, fmt.Errorf("value %d is inferior of min value %d", v, min)) } return } @@ -1735,7 +1735,7 @@ func validateCloudProjectDatabaseKafkaTopicRetentionHoursFunc(v interface{}, k s } func waitForCloudProjectDatabaseKafkaTopicReady(ctx context.Context, client *ovh.Client, serviceName, databaseId string, topicId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -1764,7 +1764,7 @@ func waitForCloudProjectDatabaseKafkaTopicReady(ctx context.Context, client *ovh } func waitForCloudProjectDatabaseKafkaTopicDeleted(ctx context.Context, client *ovh.Client, serviceName, databaseId string, topicId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -1838,7 +1838,7 @@ func (opts *CloudProjectDatabaseKafkaAclCreateOpts) FromResource(d *schema.Resou } func waitForCloudProjectDatabaseKafkaAclReady(ctx context.Context, client *ovh.Client, serviceName, databaseId string, aclId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -1867,7 +1867,7 @@ func waitForCloudProjectDatabaseKafkaAclReady(ctx context.Context, client *ovh.C } func waitForCloudProjectDatabaseKafkaAclDeleted(ctx context.Context, client *ovh.Client, serviceName, databaseId string, aclId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -1941,7 +1941,7 @@ func (opts *CloudProjectDatabaseKafkaSchemaRegistryAclCreateOpts) FromResource(d } func waitForCloudProjectDatabaseKafkaSchemaRegistryAclReady(ctx context.Context, client *ovh.Client, serviceName, databaseId string, schemaRegistryAclId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"READY"}, Refresh: func() (interface{}, string, error) { @@ -1970,7 +1970,7 @@ func waitForCloudProjectDatabaseKafkaSchemaRegistryAclReady(ctx context.Context, } func waitForCloudProjectDatabaseKafkaSchemaRegistryAclDeleted(ctx context.Context, client *ovh.Client, serviceName, databaseId string, schemaRegistryAclId string, timeOut time.Duration) error { - stateConf := &resource.StateChangeConf{ + stateConf := &retry.StateChangeConf{ Pending: []string{"DELETING"}, Target: []string{"DELETED"}, Refresh: func() (interface{}, string, error) { @@ -2105,7 +2105,7 @@ func importCloudProjectDatabasePostgresqlConnectionPool(d *schema.ResourceData, n := 3 splitId := strings.SplitN(givenId, "/", n) if len(splitId) != n { - return nil, fmt.Errorf("Import Id is not service_name/cluster_id/id formatted") + return nil, fmt.Errorf("import Id is not service_name/cluster_id/id formatted") } serviceName := splitId[0] clusterId := splitId[1] diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go new file mode 100644 index 000000000..789c712f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/error.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retry + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +func (e *NotFoundError) Unwrap() error { + return e.LastError +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +func (e *UnexpectedStateError) Unwrap() error { + return e.LastError +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} + +func (e *TimeoutError) Unwrap() error { + return e.LastError +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go new file mode 100644 index 000000000..4780090d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/state.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retry + +import ( + "context" + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. A nil result represents not found. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found (nil result from Refresh) + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForStateContext watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +// +// Cancellation from the passed in context will cancel the refresh loop +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context) (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + select { + case <-time.After(conf.Delay): + case <-cancelCh: + return + } + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + case <-ctx.Done(): + close(cancelCh) + return nil, ctx.Err() + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-ctx.Done(): + log.Println("[ERROR] Context cancelation detected, abandoning grace period") + break forSelect + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// Deprecated: Please use WaitForStateContext to ensure proper plugin shutdown +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + return conf.WaitForStateContext(context.Background()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go new file mode 100644 index 000000000..c8d2de143 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry/wait.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retry + +import ( + "context" + "errors" + "sync" + "time" +) + +// RetryContext is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Cancellation from the passed in context will propagate through to the +// underlying StateChangeConf +func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForStateContext(ctx) + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Deprecated: Please use RetryContext to ensure proper plugin shutdown +func Retry(timeout time.Duration, f RetryFunc) error { + return RetryContext(context.Background(), timeout, f) +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +func (e *RetryError) Unwrap() error { + return e.Err +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. To prevent logic errors, will return an error when passed a +// nil error. +func RetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/modules.txt b/vendor/modules.txt index feb73f342..3c9d461ff 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -203,6 +203,7 @@ github.com/hashicorp/terraform-plugin-mux/tf6muxserver github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging +github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema