Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Databases: Add online migration support #1331

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
232 changes: 232 additions & 0 deletions digitalocean/database/resource_database_online_migration.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
package database

import (
"context"
"time"

"github.com/digitalocean/godo"
"github.com/digitalocean/terraform-provider-digitalocean/digitalocean/config"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)

func ResourceDigitalOceanDatabaseOnlineMigration() *schema.Resource {
return &schema.Resource{
ReadContext: resourceDigitalOceanDatabaseOnlineMigrationStatus,
CreateContext: resourceDigitalOceanDatabaseOnlineMigrationStart,
UpdateContext: resourceDigitalOceanDatabaseOnlineMigrationStart,
DeleteContext: resourceDigitalOceanDatabaseOnlineMigrationStop,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Schema: map[string]*schema.Schema{
"cluster_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.NoZeroValues,
},
"disable_ssl": {
Type: schema.TypeBool,
Optional: true,
Description: "Disables SSL encryption when connecting to the source database",
},
"ignore_dbs": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
Description: "The list of databases to be ignored during the migration",
},
"id": {
Type: schema.TypeString,
Computed: true,
Description: "The ID of the migration",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "The status of the online migration",
},
"created_at": {
Type: schema.TypeString,
Computed: true,
Description: "The date and time when the online migration was created",
},
"source": {
Type: schema.TypeList,
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"host": {
Type: schema.TypeString,
Required: true,
Description: "The FQDN pointing to the database cluster's current primary node",
},
"port": {
Type: schema.TypeInt,
Required: true,
Description: "The port on which the database cluster is listening",
},
"db_name": {
Type: schema.TypeString,
Required: true,
Description: "The name of the default database",
},
"username": {
Type: schema.TypeString,
Required: true,
Description: "The default user of the database",
},
"password": {
Type: schema.TypeString,
Required: true,
Description: "The port on which the database cluster is listening.",
},
},
},
},
},
}
}

func resourceDigitalOceanDatabaseOnlineMigrationStart(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*config.CombinedConfig).GodoClient()
clusterID := d.Get("cluster_id").(string)

opts := &godo.DatabaseStartOnlineMigrationRequest{}

if v, ok := d.GetOk("disable_ssl"); ok {
opts.DisableSSL = v.(bool)
}

if v, ok := d.GetOk("ignore_dbs"); ok {
var ignoreDBs []string
for _, db := range v.(*schema.Set).List() {
ignoreDBs = append(ignoreDBs, db.(string))
}

opts.IgnoreDBs = ignoreDBs
}

if v, ok := d.GetOk("source"); ok {
opts.Source = expandDBOnlineMigrationSource(v.([]interface{}))
}

migrationID, onlineMigrationStatus := waitForOnlineMigration(ctx, client, d, clusterID, opts)
if onlineMigrationStatus != nil {
return onlineMigrationStatus
}

d.SetId(migrationID)

return resourceDigitalOceanDatabaseOnlineMigrationStatus(ctx, d, meta)
}

// Polls for errors in migration for 90 seconds. Requests can pass the API precheck and returns 200 response but still fail the migration quickly.
// Should notify user in this scenario.
func waitForOnlineMigration(ctx context.Context, client *godo.Client, d *schema.ResourceData, clusterID string, opts *godo.DatabaseStartOnlineMigrationRequest) (string, diag.Diagnostics) {
_, _, err := client.Databases.Get(ctx, clusterID)
if err != nil {
return "", diag.Errorf("Cluster does not exist: %s", clusterID)
}

time.Sleep(30 * time.Second)

_, _, err = client.Databases.StartOnlineMigration(ctx, clusterID, opts)
if err != nil {
return "", diag.Errorf("Error here: %s", clusterID)
}

tickerInterval := 10 //10s
timeoutSeconds := 90
n := 0
ticker := time.NewTicker(time.Duration(tickerInterval) * time.Second)

for range ticker.C {
if n*tickerInterval > timeoutSeconds {
ticker.Stop()
break
}
status, _, _ := client.Databases.GetOnlineMigrationStatus(ctx, clusterID)
if status == nil {
// if status is nil, online migration might not have kicked off yet.
continue
} else if status.Status == "error" {
// doesn't quit if error is received for online_migration.status,
// database might not be ready for connections.
// retries to start online migration
_, _, err = client.Databases.StartOnlineMigration(ctx, clusterID, opts)
if err != nil {
return "", diag.Errorf("Error starting online migration for cluster: %s", clusterID)
}
} else if status.Status == "syncing" || status.Status == "done" {
// if status is syncing, online-migration was a success and can notify user
ticker.Stop()
return status.ID, nil
}
n++
}
// if status never reaches syncing after 90 seconds, report failure.
return "", diag.Errorf("Error starting online migration for cluster: %s", clusterID)
}

func expandDBOnlineMigrationSource(config []interface{}) *godo.DatabaseOnlineMigrationConfig {
source := &godo.DatabaseOnlineMigrationConfig{}
if len(config) == 0 || config[0] == nil {
return source
}
configMap := config[0].(map[string]interface{})
if v, ok := configMap["host"]; ok {
source.Host = v.(string)
}
if v, ok := configMap["port"]; ok {
source.Port = v.(int)
}
if v, ok := configMap["db_name"]; ok {
source.DatabaseName = v.(string)
}
if v, ok := configMap["username"]; ok {
source.Username = v.(string)
}
if v, ok := configMap["password"]; ok {
source.Password = v.(string)
}
return source
}

func resourceDigitalOceanDatabaseOnlineMigrationStatus(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*config.CombinedConfig).GodoClient()
clusterID := d.Get("cluster_id").(string)

onlineMigration, resp, err := client.Databases.GetOnlineMigrationStatus(ctx, clusterID)
if err != nil {
if resp != nil && resp.StatusCode == 404 {
d.SetId("")
return nil
}

return diag.Errorf("Error retrieving database online migration status: %s", err)
}

d.SetId(onlineMigration.ID)
d.Set("status", onlineMigration.Status)
d.Set("created_at", onlineMigration.CreatedAt)

return nil
}

func resourceDigitalOceanDatabaseOnlineMigrationStop(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*config.CombinedConfig).GodoClient()
clusterID := d.Get("cluster_id").(string)
migrationID := d.Get("id").(string)

_, err := client.Databases.StopOnlineMigration(ctx, clusterID, migrationID)
if err != nil {
return diag.Errorf("Error stopping online migration: %s", err)
}

d.SetId("")
return nil
}
68 changes: 68 additions & 0 deletions digitalocean/database/resource_database_online_migration_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
package database_test

import (
"fmt"
"testing"

"github.com/digitalocean/terraform-provider-digitalocean/digitalocean/acceptance"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)

func TestAccDigitalOceanDatabaseOnlineMigration_Basic(t *testing.T) {
source := "source-" + acceptance.RandomTestName()
destination := "destination-" + acceptance.RandomTestName()

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acceptance.TestAccPreCheck(t) },
ProviderFactories: acceptance.TestAccProviderFactories,
//CheckDestroy: testAccCheckDigitalOceanDatabaseClusterDestroy,
Steps: []resource.TestStep{
{
Config: fmt.Sprintf(testAccCheckDigitalOceanDatabaseOnlineMigrationBasic, source, "8", destination, "8"),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet("digitalocean_database_online_migration.foobar", "id"),
resource.TestCheckResourceAttrSet("digitalocean_database_online_migration.foobar", "status"),
resource.TestCheckResourceAttrSet("digitalocean_database_online_migration.foobar", "created_at"),
),
},
},
})
}

const testAccCheckDigitalOceanDatabaseOnlineMigrationBasic = `
resource "digitalocean_database_cluster" "source" {
name = "%s"
engine = "mysql"
version = "%s"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
}

resource "digitalocean_database_cluster" "destination" {
name = "%s"
engine = "mysql"
version = "%s"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
}

resource "digitalocean_database_db" "source_db" {
cluster_id = digitalocean_database_cluster.source.id
name = "terraform-db-om-source"
}

resource "digitalocean_database_online_migration" "foobar" {
cluster_id = digitalocean_database_cluster.destination.id
source {
host = digitalocean_database_cluster.source.host
db_name = digitalocean_database_db.source_db.name
port = digitalocean_database_cluster.source.port
username = digitalocean_database_cluster.source.user
password = digitalocean_database_cluster.source.password
}
depends_on = [digitalocean_database_cluster.destination, digitalocean_database_cluster.source, digitalocean_database_db.source_db]
}`
1 change: 1 addition & 0 deletions digitalocean/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ func Provider() *schema.Provider {
"digitalocean_database_kafka_config": database.ResourceDigitalOceanDatabaseKafkaConfig(),
"digitalocean_database_opensearch_config": database.ResourceDigitalOceanDatabaseOpensearchConfig(),
"digitalocean_database_kafka_topic": database.ResourceDigitalOceanDatabaseKafkaTopic(),
"digitalocean_database_online_migration": database.ResourceDigitalOceanDatabaseOnlineMigration(),
"digitalocean_domain": domain.ResourceDigitalOceanDomain(),
"digitalocean_droplet": droplet.ResourceDigitalOceanDroplet(),
"digitalocean_droplet_autoscale": dropletautoscale.ResourceDigitalOceanDropletAutoscale(),
Expand Down
82 changes: 82 additions & 0 deletions docs/resources/database_online_migration.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
---
page_title: "DigitalOcean: digitalocean_database_mysql_config"
subcategory: "Databases"
---

# digitalocean\_database\_online\_migration

Provides a virtual resource that can be used to start an online migration
for a DigitalOcean managed database cluster. Migrating a cluster establishes a
connection with an existing cluster and replicates its contents to the target
cluster. If the existing database is continuously being written to, the migration
process will continue for up to two weeks unless it is manually stopped.
Online migration is only available for MySQL, PostgreSQL, and Redis clusters.

## Example Usage

```hcl
resource "digitalocean_database_cluster" "source" {
name = "st01"
engine = "mysql"
version = "8"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
}

resource "digitalocean_database_cluster" "destination" {
name = "dt01"
engine = "mysql"
version = "8"
size = "db-s-1vcpu-1gb"
region = "nyc1"
node_count = 1
tags = ["production"]
}

resource "digitalocean_database_db" "source_db" {
cluster_id = digitalocean_database_cluster.source.id
name = "terraform-db-om-source"
}

resource "digitalocean_database_online_migration" "foobar" {
cluster_id = digitalocean_database_cluster.destination.id
source {
host = digitalocean_database_cluster.source.host
db_name = digitalocean_database_db.source_db.name
port = digitalocean_database_cluster.source.port
username = digitalocean_database_cluster.source.user
password = digitalocean_database_cluster.source.password
}
depends_on = [digitalocean_database_cluster.destination, digitalocean_database_cluster.source, digitalocean_database_db.source_db]
}
```


## Argument Reference

The following arguments are supported. See the [DigitalOcean API documentation](https://docs.digitalocean.com/reference/api/digitalocean/#tag/Databases/operation/databases_update_onlineMigration)
for additional details on each option.

* `cluster_id` - (Required) The ID of the target MySQL cluster.
* `source` - (Required) Configuration for migration
* `host` - (Required) The FQDN pointing to the database cluster's current primary node.
* `port` - (Required) The port on which the database cluster is listening.
* `dbname` - (Required) The name of the default database.
* `username` - (Required) The default user for the database.
* `password` - (Required) A randomly generated password for the default user.
* `disable_ssl` - (Optional) When set to true, enables SSL encryption when connecting to the source database.
* `ignore_dbs` - (Optional) A list of databases that should be ignored during migration.

## Attributes Reference

All above attributes are exported. If an attribute was set outside of Terraform, it will be computed.

## Import

A MySQL database cluster's online_migration can be imported using the `id` the parent cluster, e.g.

```
terraform import digitalocean_database_online_migration.example 4b62829a-9c42-465b-aaa3-84051048e712
```