diff --git a/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts b/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts
index 15b1f182fe19f..fb746a73ba8c1 100644
--- a/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts
+++ b/apps/docs/components/Navigation/NavigationMenu/NavigationMenu.constants.ts
@@ -1154,14 +1154,6 @@ export const database: NavMenuConstant = {
name: 'Setting up',
url: '/guides/database/replication/replication-setup' as `/${string}`,
},
- {
- name: 'Destinations',
- url: '/guides/database/replication/replication-destinations' as `/${string}`,
- items: [
- { name: 'BigQuery', url: '/guides/database/replication/replication-bigquery' },
- { name: 'Iceberg', url: '/guides/database/replication/replication-iceberg' },
- ],
- },
{
name: 'Monitoring',
url: '/guides/database/replication/replication-monitoring' as `/${string}`,
@@ -1170,7 +1162,7 @@ export const database: NavMenuConstant = {
],
},
{
- name: 'Manual Replication',
+ name: 'Manual replication',
url: '/guides/database/replication/manual-replication-setup' as `/${string}`,
items: [
{
diff --git a/apps/docs/content/guides/database/replication.mdx b/apps/docs/content/guides/database/replication.mdx
index 6cd1f0fdd284d..4a4113d58ca1f 100644
--- a/apps/docs/content/guides/database/replication.mdx
+++ b/apps/docs/content/guides/database/replication.mdx
@@ -1,7 +1,7 @@
---
id: 'replication'
title: 'Database Replication'
-description: 'Replicate your database to external destinations using replication powered by Supabase ETL or manual replication.'
+description: 'Replicate your database to external destinations using Supabase replication or manual replication.'
subtitle: 'An introduction to database replication and change data capture.'
sidebar_label: 'Overview'
---
diff --git a/apps/docs/content/guides/database/replication/manual-replication-faq.mdx b/apps/docs/content/guides/database/replication/manual-replication-faq.mdx
index d909e2b629468..2ee5bc876d9fd 100644
--- a/apps/docs/content/guides/database/replication/manual-replication-faq.mdx
+++ b/apps/docs/content/guides/database/replication/manual-replication-faq.mdx
@@ -1,6 +1,6 @@
---
-id: 'replication-faq'
-title: 'Replication FAQ'
+id: 'manual-replication-faq'
+title: 'Manual Replication FAQ'
description: 'Frequently asked questions about manual database replication.'
subtitle: 'Common questions and considerations when setting up manual replication.'
sidebar_label: 'FAQ'
@@ -30,13 +30,15 @@ You can view [publications](/dashboard/project/default/database/publications) in
## How to configure database settings for replication?
-Yes. Using the Supabase CLI, you can [configure database settings](/docs/guides/database/custom-postgres-config#cli-configurable-settings) to optimize them for your replication needs. These values can vary depending on the activity of your database size and activity.
+Using the Supabase CLI, you can [configure database settings](/docs/guides/database/custom-postgres-config#cli-configurable-settings) to optimize them for your replication needs. These values can vary depending on your database size and activity.
## What are some important configuration options?
Some of the more important options to be aware of are:
-- `max_wal_size`
-- `max_slot_wal_keep_size`
-- `wal_keep_size`
-- `max_wal_senders`
+- `max_wal_size` - Maximum size the WAL can grow between automatic WAL checkpoints
+- `max_slot_wal_keep_size` - Maximum size of WAL files that replication slots are allowed to retain
+- `wal_keep_size` - Minimum number of past WAL files to keep for standby servers
+- `max_wal_senders` - Maximum number of concurrent connections from standby servers or streaming backup clients
+
+These settings help ensure your replication slots don't run out of space and that replicas can reconnect without requiring a full re-sync.
diff --git a/apps/docs/content/guides/database/replication/manual-replication-monitoring.mdx b/apps/docs/content/guides/database/replication/manual-replication-monitoring.mdx
index 0047bc80136e4..3742b2eaf8cc8 100644
--- a/apps/docs/content/guides/database/replication/manual-replication-monitoring.mdx
+++ b/apps/docs/content/guides/database/replication/manual-replication-monitoring.mdx
@@ -1,6 +1,6 @@
---
-id: 'monitoring-replication'
-title: 'Monitoring Replication'
+id: 'manual-replication-monitoring'
+title: 'Manual Replication Monitoring'
description: 'Monitor replication lag and status for manual replication setups.'
subtitle: 'Track replication health and performance.'
sidebar_label: 'Monitoring'
@@ -16,7 +16,7 @@ Monitoring replication lag is important and there are 3 ways to do this:
3. [Metrics](/docs/guides/telemetry/metrics) - Using the prometheus endpoint for your project
- replication_slots_max_lag_bytes - this is the more important one
- pg_stat_replication_replay_lag - lag to replay WAL files from the source DB on the target DB (throttled by disk or high activity)
- - pg_stat_replication_send_lag - lag in sending WAL files from the source DB (a high lag means that the publisher is not being asked to send new WAL files OR a network issues)
+ - pg_stat_replication_send_lag - lag in sending WAL files from the source DB (a high lag means that the publisher is not being asked to send new WAL files OR network issues)
### Primary
@@ -51,7 +51,7 @@ The WAL size can be checked using the `pg_ls_waldir()` function:
select * from pg_ls_waldir();
```
-#### Check LSN
+#### Check the LSN
```sql
select pg_current_wal_lsn();
@@ -91,7 +91,7 @@ ORDER BY
table_name;
```
-#### Check LSN
+#### Check the LSN
```sql
select pg_last_wal_replay_lsn();
diff --git a/apps/docs/content/guides/database/replication/manual-replication-setup.mdx b/apps/docs/content/guides/database/replication/manual-replication-setup.mdx
index f6ddadd5a86a2..4a0fe678ff4b0 100644
--- a/apps/docs/content/guides/database/replication/manual-replication-setup.mdx
+++ b/apps/docs/content/guides/database/replication/manual-replication-setup.mdx
@@ -6,7 +6,7 @@ subtitle: 'Set up replication with Airbyte, Estuary, Fivetran, and other tools.'
sidebar_label: 'Setting up'
---
-This guide covers setting up **manual replication** using external tools. If you prefer a simpler, managed solution, see [replication](/docs/guides/database/replication/replication-setup) instead.
+This guide covers setting up **manual replication** using external tools. If you prefer a simpler, managed solution, see [Replication](/docs/guides/database/replication/replication-setup) instead.
@@ -21,7 +21,7 @@ To set up replication, the following is recommended:
- Instance size of XL or greater
- [IPv4 add-on](/docs/guides/platform/ipv4-address) enabled
-To create a replication slot, you will need to use the `postgres` user and follow the instructions in our [guide](/docs/guides/database/postgres/setup-replication-external).
+To create a replication slot, you will need to use the `postgres` user and follow the instructions in the [external replication setup guide](/docs/guides/database/postgres/setup-replication-external).
@@ -29,7 +29,7 @@ If you are running Postgres 17 or higher, you can create a new user and grant th
-If you are replicating to an external system and using any of the tools below, check their documentation first and we have added additional information where the setup with Supabase can vary.
+If you are replicating to an external system and using any of the tools below, check their documentation first. Additional information is provided where the setup with Supabase can vary.
@@ -77,7 +77,7 @@ Materialize has the following [documentation](https://materialize.com/docs/sql/c
You can follow those steps with the following modifications:
-1. Follow the steps in our [guide](/docs/guides/database/postgres/setup-replication-external) to create a publication slot
+1. Follow the steps in the [external replication setup guide](/docs/guides/database/postgres/setup-replication-external) to create a publication slot
diff --git a/apps/docs/content/guides/database/replication/replication-bigquery.mdx b/apps/docs/content/guides/database/replication/replication-bigquery.mdx
deleted file mode 100644
index 82fdc29ddb52d..0000000000000
--- a/apps/docs/content/guides/database/replication/replication-bigquery.mdx
+++ /dev/null
@@ -1,126 +0,0 @@
----
-id: 'replication-bigquery'
-title: 'Replicate to BigQuery'
-description: 'Replicate your Supabase database to Google BigQuery using replication.'
-subtitle: 'Stream data changes to BigQuery in real-time.'
-sidebar_label: 'BigQuery'
----
-
-
-
-Replication is currently in private alpha. Access is limited and features may change.
-
-
-
-BigQuery is Google's fully managed data warehouse. Replication powered by [Supabase ETL](https://github.com/supabase/etl) allows you to automatically sync your Supabase database tables to BigQuery for analytics and reporting.
-
-
-
-This page covers BigQuery-specific configuration. For complete setup instructions including publications, general settings, and pipeline management, see the [Replication Setup guide](/docs/guides/database/replication/replication-setup).
-
-
-
-### Setup
-
-Setting up BigQuery replication requires preparing your GCP resources, then configuring BigQuery as a destination.
-
-#### Step 1: Prepare GCP resources
-
-Before configuring BigQuery as a destination, set up the following in Google Cloud Platform:
-
-1. **Google Cloud Platform (GCP) account**: [Sign up for GCP](https://cloud.google.com/gcp) if you don't have one
-2. **BigQuery dataset**: Create a [BigQuery dataset](https://cloud.google.com/bigquery/docs/datasets-intro) in your GCP project
-3. **GCP service account key**: Create a [service account](https://cloud.google.com/iam/docs/keys-create-delete) with the **BigQuery Data Editor** role and download the JSON key file
-
-#### Step 2: Add BigQuery as a destination
-
-After preparing your GCP resources, configure BigQuery as a destination:
-
-1. Navigate to [Database](/dashboard/project/_/database/replication) → **Replication** in your Supabase Dashboard
-2. Click **Add destination**
-3. Configure the destination:
-
-
-
- - **Destination type**: Select **BigQuery**
- - **Project ID**: Your BigQuery project identifier (found in the GCP Console)
- - **Dataset ID**: The name of your BigQuery dataset (without the project ID)
-
-
-
- In the GCP Console, the dataset is shown as `project-id.dataset-id`. Enter only the part after the dot. For example, if you see `my-project.my_dataset`, enter `my_dataset`.
-
-
-
- - **Service Account Key**: Your GCP service account key in JSON format. The service account must have the following permissions:
- - `bigquery.datasets.get`
- - `bigquery.tables.create`
- - `bigquery.tables.get`
- - `bigquery.tables.getData`
- - `bigquery.tables.update`
- - `bigquery.tables.updateData`
-
-4. Complete the remaining configuration following the [Replication Setup guide](/docs/guides/database/replication/replication-setup)
-
-### How it works
-
-Once configured, replication to BigQuery:
-
-1. Captures changes from your Postgres database (INSERT, UPDATE, DELETE operations)
-2. Batches changes for optimal performance
-3. Creates BigQuery tables automatically to match your Postgres schema
-4. Streams data to BigQuery with CDC metadata
-
-
-
-Due to ingestion latency in BigQuery's streaming API, there may be a delay (typically seconds to minutes) in data appearing. This is normal and expected for BigQuery's architecture.
-
-
-
-#### BigQuery CDC format
-
-BigQuery tables include additional columns for change tracking:
-
-- `_change_type`: The type of change (`INSERT`, `UPDATE`, `DELETE`)
-- `_commit_timestamp`: When the change was committed in Postgres
-- `_stream_id`: Internal identifier for the replication stream
-
-### Querying replicated data
-
-Once replication is running, you can query your data in BigQuery:
-
-```sql
--- Query the replicated table
-SELECT * FROM `your-project.your_dataset.users`
-WHERE created_at > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 7 DAY);
-
--- View CDC changes
-SELECT
- _change_type,
- _commit_timestamp,
- id,
- name,
- email
-FROM `your-project.your_dataset.users`
-ORDER BY _commit_timestamp DESC
-LIMIT 100;
-```
-
-### Limitations
-
-BigQuery-specific limitations:
-
-- **Ingestion latency**: BigQuery's streaming API has inherent latency (typically seconds to minutes)
-- **Row size**: Limited to 10 MB per row due to BigQuery Storage Write API constraints
-
-For general replication limitations that apply to all destinations, see the [Replication Setup guide](/docs/guides/database/replication/replication-setup#limitations).
-
-### Next steps
-
-- [Set up replication](/docs/guides/database/replication/replication-setup)
-- [Monitor replication](/docs/guides/database/replication/replication-monitoring)
-- [View replication FAQ](/docs/guides/database/replication/replication-faq)
diff --git a/apps/docs/content/guides/database/replication/replication-destinations.mdx b/apps/docs/content/guides/database/replication/replication-destinations.mdx
deleted file mode 100644
index da20c733d8fd9..0000000000000
--- a/apps/docs/content/guides/database/replication/replication-destinations.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
----
-id: 'replication-destinations'
-title: 'Replication Destinations'
-description: 'Choose where to replicate your database with Replication.'
-subtitle: 'Available destinations for Replication.'
-sidebar_label: 'Destinations'
----
-
-
-
-Replication is currently in private alpha. Access is limited and features may change.
-
-
-
-Replication powered by [Supabase ETL](https://github.com/supabase/etl) supports multiple destination types for syncing your database. Choose the destination that best fits your analytics and integration needs.
-
-
-
-Some destinations may not be available for all users. Additional destinations are planned for the future, but we don't have public timelines to share at this time.
-
-
-
-### Available destinations
-
-| Destination | Description | Configuration |
-| ------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------ |
-| **BigQuery** | Google's fully managed data warehouse | [Configure BigQuery →](/docs/guides/database/replication/replication-bigquery) |
-| **Iceberg (Analytics Buckets)** | Apache Iceberg tables in S3-compatible storage | [Configure Iceberg →](/docs/guides/database/replication/replication-iceberg) |
-
-### Next steps
-
-- [Set up replication](/docs/guides/database/replication/replication-setup)
-- [Monitor replication](/docs/guides/database/replication/replication-monitoring)
diff --git a/apps/docs/content/guides/database/replication/replication-faq.mdx b/apps/docs/content/guides/database/replication/replication-faq.mdx
index 2dedd59a69a80..bb85b14a0c5fc 100644
--- a/apps/docs/content/guides/database/replication/replication-faq.mdx
+++ b/apps/docs/content/guides/database/replication/replication-faq.mdx
@@ -14,22 +14,23 @@ Replication is currently in private alpha. Access is limited and features may ch
## What destinations are supported?
-Replication currently supports Iceberg (Analytics Buckets). See the [Destinations guide](/docs/guides/database/replication/replication-destinations) for details.
+Replication currently supports Analytics Buckets (Iceberg format) and BigQuery. See the destination tabs in the [Setup guide](/docs/guides/database/replication/replication-setup#step-3-add-a-destination) for configuration details.
+
+Availability varies based on the planned roll-out strategy. The destinations you can access depend on your project and access level.
## Why is a table not being replicated?
Common reasons:
-- **Missing primary key**: Tables must have a primary key to be replicated
-- **Not in publication**: Ensure the table is included in your publication
+- **Missing primary key**: Tables must have a primary key to be replicated (Postgres logical replication requirement)
+- **Not in publication**: Ensure the table is included in your Postgres publication
- **Unsupported data types**: Tables with custom data types are not supported
-- **Partitioned tables**: Not currently supported
Check your publication settings and verify your table meets the requirements.
## Why aren't publication changes reflected after adding or removing tables?
-After modifying your publication, you must restart the pipeline for changes to take effect. See [Adding or removing tables](/docs/guides/database/replication/replication-setup#adding-or-removing-tables) for instructions.
+After modifying your Postgres publication, you must restart the replication pipeline for changes to take effect. See [Adding or removing tables](/docs/guides/database/replication/replication-setup#adding-or-removing-tables) for instructions.
## Why is a pipeline in failed state?
@@ -48,7 +49,7 @@ Table errors occur during the copy phase. To recover, click **View status**, fin
## How to verify replication is working
-Check the [Database](/dashboard/project/_/database/replication) → **Replication** page:
+Check the [Database](/dashboard/project/_/database/replication) → **replication** page:
1. Verify your pipeline shows **Running** status
2. Click **View status** to check table states
@@ -57,35 +58,49 @@ Check the [Database](/dashboard/project/_/database/replication) → **Replicatio
See the [Replication Monitoring guide](/docs/guides/database/replication/replication-monitoring) for comprehensive monitoring instructions.
-## What are the main limitations?
+## How to stop or pause replication
-Key limitations to be aware of:
+You can manage your pipeline using the actions menu in the destinations list. See [Managing your pipeline](/docs/guides/database/replication/replication-setup#managing-your-pipeline) for details on available actions.
-- **Primary keys**: Required on all tables
-- **Custom data types**: Not supported
-- **Schema changes**: Not automatically handled
+Note: Stopping replication will cause changes to queue up in the WAL.
-Destination-specific limitations may also apply. See the [Iceberg](/docs/guides/database/replication/replication-iceberg#limitations) destination page for details.
+## What happens if a table is deleted at the destination?
-## How to stop or pause replication
+If a table is deleted downstream at the destination (e.g., in your Analytics Bucket or BigQuery dataset), the replication pipeline will automatically recreate it.
-You can manage your pipeline using the actions menu in the destinations list. See [Managing your pipeline](/docs/guides/database/replication/replication-setup#managing-your-pipeline) for details on available actions.
+This behavior is by design to prevent the pipeline from breaking if tables are accidentally deleted. The pipeline ensures that all tables in your publication are always present at the destination.
-Note: Stopping replication will cause changes to queue up in the WAL.
+**To permanently remove a table from your destination:**
+
+You have two options:
+
+**Option 1: Pause the pipeline first**
+
+1. Pause or delete your replication pipeline
+2. Delete the table at your destination
+3. The table will not be recreated since the pipeline is not running
+
+**Option 2: Remove from publication first**
+
+1. Remove the table from your Postgres publication using `ALTER PUBLICATION ... DROP TABLE`
+2. Restart your replication pipeline to apply the change (the table at the destination will remain but stop receiving new changes)
+3. Delete the table at your destination
+
+Note: Removing a table from the publication and restarting the pipeline does not delete the table downstream, it only stops replicating new changes to it.
## Can data duplicates occur during pipeline operations?
Yes, data duplicates can occur in certain scenarios when stopping a pipeline.
-When you stop a pipeline (for restarts or updates), replication tries to finish processing any transactions that are currently being sent to your destination. It waits up to a few minutes to allow these in-progress transactions to complete cleanly before stopping.
+When you stop a pipeline (for restarts or updates), the replication process tries to finish processing any transactions that are currently being sent to your destination. It waits up to a few minutes to allow these in-progress transactions to complete cleanly before stopping.
However, if a transaction in your database takes longer than this waiting period to complete, the pipeline will stop before that entire transaction has been fully processed. When the pipeline starts again, it must restart the incomplete transaction from the beginning to maintain transaction boundaries, which results in some data being sent twice to your destination.
-**Understanding transaction boundaries**: A transaction is a group of database changes that happen together (for example, all changes within a `BEGIN...COMMIT` block). Replication must process entire transactions - it cannot process part of a transaction, stop, and then continue from the middle. This means if a transaction is interrupted, the whole transaction must be replayed when the pipeline resumes.
+**Understanding transaction boundaries**: A transaction is a group of database changes that happen together (for example, all changes within a `BEGIN...COMMIT` block). Postgres logical replication must process entire transactions - it cannot process part of a transaction, stop, and then continue from the middle. This means if a transaction is interrupted, the whole transaction must be replayed when the pipeline resumes.
**Example scenario**: Suppose you have a batch operation that updates 10,000 rows within a single transaction. If this operation takes 10 minutes to complete and you stop the pipeline after 5 minutes (when 5,000 rows have been processed), the pipeline cannot resume from row 5,001. Instead, when it restarts, it must reprocess all 10,000 rows from the beginning, resulting in the first 5,000 rows being sent to your destination twice.
-**Important**: We are not currently planning to implement automatic deduplication. If your use case requires guaranteed exactly-once delivery, you should implement deduplication logic in your downstream systems based on primary keys or other unique identifiers.
+**Important**: There are currently no plans to implement automatic deduplication. If your use case requires guaranteed exactly-once delivery, you should implement deduplication logic in your downstream systems based on primary keys or other unique identifiers.
## Where to find replication logs
diff --git a/apps/docs/content/guides/database/replication/replication-iceberg.mdx b/apps/docs/content/guides/database/replication/replication-iceberg.mdx
deleted file mode 100644
index 3319493871f55..0000000000000
--- a/apps/docs/content/guides/database/replication/replication-iceberg.mdx
+++ /dev/null
@@ -1,88 +0,0 @@
----
-id: 'replication-iceberg'
-title: 'Replicate to Iceberg (Analytics Buckets)'
-description: 'Replicate your Supabase database to Iceberg format using Analytics Buckets.'
-subtitle: 'Stream data to Analytics Buckets.'
-sidebar_label: 'Iceberg'
----
-
-
-
-Replication is currently in private alpha. Access is limited and features may change.
-
-
-
-
-
-Iceberg replication is currently incomplete. It provides an append-only log listing all your data changes with an additional column explaining the type of operation (INSERT, UPDATE, DELETE).
-
-
-
-Apache Iceberg is an open table format for analytic datasets. Replication powered by [Supabase ETL](https://github.com/supabase/etl) to Iceberg uses Supabase [Analytics Buckets](/docs/guides/storage/analytics) to store your replicated data.
-
-
-
-This page covers Iceberg-specific configuration. For complete setup instructions including publications, general settings, and pipeline management, see the [Replication Setup guide](/docs/guides/database/replication/replication-setup).
-
-
-
-### Setup
-
-Setting up Iceberg replication requires two steps: creating an Analytics Bucket, then configuring it as a destination.
-
-#### Step 1: Create an Analytics bucket
-
-First, create an Analytics Bucket to store your replicated data:
-
-1. Navigate to [Storage](/dashboard/project/_/storage/buckets) → **Analytics** in your Supabase Dashboard
-2. Click **New bucket**
-
-
-
-#### Step 2: Add Iceberg as a destination
-
-After clicking **New bucket**, fill in the bucket details and copy the credentials:
-
-1. Fill in the bucket details:
-
-
-
- - **Name**: A unique name for your bucket
- - **Region**: Select the region where your data will be stored
-
-2. Click **Create bucket**
-3. **Copy the credentials** displayed after bucket creation (Catalog Token, S3 Access Key ID, S3 Secret Access Key). You'll need these in the next steps.
-4. Navigate to [Database](/dashboard/project/_/database/replication) → **Replication** in your Supabase Dashboard
-5. Click **Add destination**
-6. Configure the destination:
- - **Destination type**: Select **Iceberg (Analytics Bucket)**
- - **Bucket**: The name of your Analytics Bucket from Step 1
- - **Namespace**: The schema name where your tables will be replicated (e.g., `public`)
- - **Catalog Token**: Authentication token for accessing the Iceberg catalog (copied in Step 3)
- - **S3 Access Key ID**: Access key for S3-compatible storage (copied in Step 3)
- - **S3 Secret Access Key**: Secret key for S3-compatible storage (copied in Step 3)
-7. Complete the remaining configuration following the [Replication Setup guide](/docs/guides/database/replication/replication-setup)
-
-For more information about Analytics Buckets, see the [Analytics Buckets documentation](/docs/guides/storage/analytics).
-
-### Limitations
-
-Iceberg-specific limitations:
-
-- **Append-only log**: Currently provides an append-only log format rather than a full table representation
-
-For general replication limitations that apply to all destinations, see the [Replication Setup guide](/docs/guides/database/replication/replication-setup#limitations).
-
-### Next steps
-
-- [Set up replication](/docs/guides/database/replication/replication-setup)
-- [Monitor replication](/docs/guides/database/replication/replication-monitoring)
-- [View replication FAQ](/docs/guides/database/replication/replication-faq)
diff --git a/apps/docs/content/guides/database/replication/replication-monitoring.mdx b/apps/docs/content/guides/database/replication/replication-monitoring.mdx
index f157e216f66b3..4f6baf0444c51 100644
--- a/apps/docs/content/guides/database/replication/replication-monitoring.mdx
+++ b/apps/docs/content/guides/database/replication/replication-monitoring.mdx
@@ -12,14 +12,14 @@ Replication is currently in private alpha. Access is limited and features may ch
-After setting up replication, you can monitor the status and health of your replication pipelines directly from the Supabase Dashboard. The pipeline is the active process that continuously replicates changes from your database to your destination.
+After setting up replication, you can monitor the status and health of your replication pipelines directly from the Supabase Dashboard. The pipeline is the active Postgres replication process that continuously streams changes from your database to your destination.
### Viewing pipeline status
To monitor your replication pipelines:
1. Navigate to the [Database](/dashboard/project/_/database/replication) section in your Supabase Dashboard
-2. Select the **Replication** tab
+2. Select the **replication** tab
3. You'll see a list of all your destinations with their pipeline status
+
+When a pipeline error occurs, you'll receive an email notification immediately. This ensures you're promptly notified of any issues so you can take action to resolve them.
+
+
+
-Replication powered by [Supabase ETL](https://github.com/supabase/etl) requires two main components: a **publication** (source) and a **destination**. Follow these steps to set up your replication pipeline.
+Replication uses **Postgres logical replication** to stream changes from your database. Powered by [Supabase ETL](https://github.com/supabase/etl), an open source tool built for Postgres logical replication, it provides a managed interface through the Dashboard to configure and monitor replication pipelines.
+
+## Setup overview
+
+Replication requires two main components: a **Postgres publication** (defines what to replicate) and a **destination** (where data is sent). Follow these steps to set up your replication pipeline.
-If you already have a publication set up, you can skip to [Step 2: Enable replication](#step-2-enable-replication).
+If you already have a Postgres publication set up, you can skip to [Step 2: Enable replication](#step-2-enable-replication).
-### Step 1: Create a publication
+### Step 1: Create a Postgres publication
-A publication defines which tables and change types will be replicated. You need to create a publication using SQL.
+A Postgres publication defines which tables and change types will be replicated from your database. You create publications using SQL.
#### Creating a publication
@@ -36,7 +40,7 @@ create publication pub_users_orders
for table users, orders;
```
-This publication will track all changes (INSERT, UPDATE, DELETE) for both the `users` and `orders` tables.
+This publication will track all changes (INSERT, UPDATE, DELETE, TRUNCATE) for both the `users` and `orders` tables.
##### Publication for all tables in a schema
@@ -96,7 +100,7 @@ After creating a publication via SQL, you can view it in the Supabase Dashboard:
Before adding destinations, you need to enable replication for your project:
1. Navigate to the [Database](/dashboard/project/_/database/replication) section in your Supabase Dashboard
-2. Select the **Replication** tab
+2. Select the **replication** tab
3. Click **Enable replication** to activate replication for your project
+
+
+[Analytics Buckets](/docs/guides/storage/analytics/introduction) are specialized storage buckets in Supabase Storage designed for analytical workloads. They provide S3-compatible storage and use the [Apache Iceberg](https://iceberg.apache.org/) open table format, making your data accessible via standard tools like DuckDB, Spark, and other analytics platforms.
+
+When you replicate to Analytics Buckets, your database changes are automatically written in Iceberg format, creating tables in object storage that you can query for analytics.
+
+##### Step 1: Create an analytics bucket
+
+First, create an analytics bucket to store your replicated data:
+
+1. Navigate to [Storage](/dashboard/project/_/storage/buckets) → **Analytics** in your Supabase Dashboard
+2. Click **New bucket**
-#### Available destinations
+
-For a complete list of available destinations and how to choose the right one for your needs, see [Destinations](/docs/guides/database/replication/replication-destinations).
+3. Fill in the bucket details:
-#### Configuration
+
-1. In the Replication tab, click **Add destination**
-2. Configure the destination settings:
+ - **Name**: A unique name for your bucket (e.g., `analytics_warehouse`)
+ - **Region**: Select the region where your data will be stored
- **General Settings:**
+4. Click **Create bucket**
+
+5. **Copy the credentials** displayed after bucket creation. You'll need these in the next steps:
+ - **Catalog Token**: Authentication token for accessing the Iceberg catalog
+ - **S3 Access Key ID**: Access key for S3-compatible storage
+ - **S3 Secret Access Key**: Secret key for S3-compatible storage
+
+##### Step 2: Configure analytics buckets as a destination
+
+1. Navigate to [Database](/dashboard/project/_/database/replication) → **replication** in your Supabase Dashboard
+2. Click **Add destination**
+
+
+
+3. Configure the general settings:
- **Destination name**: A name to identify this destination (e.g., "Analytics Warehouse")
- - **Publication**: The publication to replicate data from (created in Step 1)
- - **Destination type**: Choose from available destination types
+ - **Publication**: The publication to replicate data from (created in [Step 1](#step-1-create-a-postgres-publication))
+ - **Destination type**: Select **Analytics Buckets**
- **Destination-specific settings:**
- Each destination type requires different configuration. See the [Destinations guide](/docs/guides/database/replication/replication-destinations) for configuration details specific to your chosen destination.
+4. Configure Analytics Buckets settings:
-
+ - **Bucket**: The name of your analytics bucket from Step 1
+ - **Namespace**: The schema name where your tables will be replicated (e.g., `public`)
+ - **Catalog Token**: Authentication token from Step 1
+ - **S3 Access Key ID**: Access key from Step 1
+ - **S3 Secret Access Key**: Secret key from Step 1
+
+5. Configure **Advanced Settings** (optional):
+
+ - **Batch wait time (milliseconds)**: How long to wait for more changes before sending a batch. Default is recommended for optimal performance.
+
+6. Click **Create and start** to begin replication
+
+Your replication pipeline will now start copying data from your database to the analytics bucket in Iceberg format.
+
+##### How it works
+
+Once configured, replication to Analytics Buckets:
+
+1. Captures changes from your Postgres database (INSERT, UPDATE, DELETE, TRUNCATE operations)
+2. Batches changes for optimal performance
+3. Creates Iceberg tables automatically to match your Postgres schema
+4. Streams data to Analytics Buckets
+
+##### How tables are structured
+
+Replicated tables use a changelog structure:
+
+- Tables are created with a `_changelog` suffix in the name (e.g., `users_changelog`)
+- Each table contains a `cdc_operation` column indicating the operation type: `INSERT`, `UPDATE`, `DELETE`, or `TRUNCATE`
+- This append-only format preserves the complete history of all changes
+
+##### Limitations
+
+- **Append-only log**: Currently provides an append-only log format rather than a full table representation
+
+##### Additional resources
+
+- [Analytics Buckets documentation](/docs/guides/storage/analytics/introduction) - Learn more about Analytics Buckets and S3-compatible storage
+- [Realtime Data Sync to Analytics Buckets](/docs/guides/storage/analytics/replication) - Step-by-step guide for replicating to Analytics Buckets
+- [Apache Iceberg](https://iceberg.apache.org/) - Official Iceberg documentation
+
+
+
+
+[BigQuery](https://cloud.google.com/bigquery) is Google's fully managed data warehouse. You can replicate your database tables to BigQuery for analytics and reporting.
+
+##### Step 1: Prepare GCP resources
+
+Before configuring BigQuery as a destination, set up the following in Google Cloud Platform:
+
+1. **Google Cloud Platform (GCP) account**: [Sign up for GCP](https://cloud.google.com/gcp) if you don't have one
+
+2. **BigQuery dataset**: Create a [BigQuery dataset](https://cloud.google.com/bigquery/docs/datasets-intro) in your GCP project
-3. Configure **Advanced Settings** (optional):
+ - Open the BigQuery console in GCP
+ - Select your project
+ - Click "Create Dataset"
+ - Provide a dataset ID (e.g., `supabase_replication`)
- - **Batch wait time (milliseconds)**: How long to wait for more changes before sending a batch. We recommend leaving this at the default value for optimal performance. Setting this too low can result in too much traffic and less efficient batching.
+3. **GCP service account key**: Create a [service account](https://cloud.google.com/iam/docs/keys-create-delete) with appropriate permissions:
-4. Click **Create and start** to begin replication
+ - Go to IAM & Admin → Service Accounts
+ - Click "Create Service Account"
+ - Grant the **BigQuery Data Editor** role
+ - Create and download the JSON key file
+
+ Required permissions:
+
+ - `bigquery.datasets.get`
+ - `bigquery.tables.create`
+ - `bigquery.tables.get`
+ - `bigquery.tables.getData`
+ - `bigquery.tables.update`
+ - `bigquery.tables.updateData`
+
+##### Step 2: Configure BigQuery as a destination
+
+1. Navigate to [Database](/dashboard/project/_/database/replication) → **replication** in your Supabase Dashboard
+2. Click **Add destination**
+
+
+
+3. Configure the general settings:
+
+ - **Destination name**: A name to identify this destination (e.g., "BigQuery Warehouse")
+ - **Publication**: The publication to replicate data from (created in [Step 1](#step-1-create-a-postgres-publication))
+ - **Destination type**: Select **BigQuery**
+
+4. Configure BigQuery-specific settings:
+
+ - **Project ID**: Your BigQuery project identifier (found in the GCP Console)
+ - **Dataset ID**: The name of your BigQuery dataset (without the project ID)
+
+
+
+ In the GCP Console, the dataset is shown as `project-id.dataset-id`. Enter only the part after the dot. For example, if you see `my-project.my_dataset`, enter `my_dataset`.
+
+
+
+ - **Service Account Key**: Your GCP service account key in JSON format (from Step 1)
+
+5. Configure **Advanced Settings** (optional):
+
+ - **Batch wait time (milliseconds)**: How long to wait for more changes before sending a batch. Default is recommended for optimal performance.
+
+6. Click **Create and start** to begin replication
+
+Your replication pipeline will now start copying data from your database to BigQuery.
+
+##### How it works
+
+Once configured, replication to BigQuery:
+
+1. Captures changes from your Postgres database (INSERT, UPDATE, DELETE, TRUNCATE operations)
+2. Batches changes for optimal performance
+3. Creates BigQuery tables automatically to match your Postgres schema
+4. Streams data to BigQuery
+
+##### How tables are structured
+
+Due to BigQuery limitations, replicated tables use a versioned structure:
+
+- The table you query is a **view** (e.g., `users`)
+- The actual data is stored in versioned tables with a `_version` suffix (e.g., `users_version`)
+- When a table is truncated in your database, a new version is created and the view automatically points to the latest version
+
+This structure handles table truncations seamlessly while maintaining query compatibility.
+
+##### Limitations
+
+- **Row size**: Limited to 10 MB per row due to BigQuery Storage Write API constraints
+
+##### Additional resources
+
+- [BigQuery documentation](https://cloud.google.com/bigquery/docs) - Official Google BigQuery documentation
+
+
+
### Step 4: Monitor your pipeline
-After creating a destination, the pipeline will start and appear in the destinations list. You can monitor the pipeline's status and performance from the Dashboard.
+After creating a destination, the replication pipeline will start and appear in the destinations list. You can monitor the pipeline's status and performance from the Dashboard.
@@ -171,11 +355,11 @@ Available actions:
### Adding or removing tables
-If you need to modify which tables are replicated after your pipeline is already running, follow these steps:
+If you need to modify which tables are replicated after your replication pipeline is already running, follow these steps:
-If your publication uses `FOR ALL TABLES` or `FOR TABLES IN SCHEMA`, new tables in that scope are automatically included in the publication. However, you still **must restart the pipeline** for the changes to take effect.
+If your Postgres publication uses `FOR ALL TABLES` or `FOR TABLES IN SCHEMA`, new tables in that scope are automatically included in the publication. However, you still **must restart the replication pipeline** for the changes to take effect.
@@ -191,11 +375,11 @@ If your publication uses `FOR ALL TABLES` or `FOR TABLES IN SCHEMA`, new tables
alter publication pub_users_orders add table products, categories;
```
-2. **Restart the pipeline** using the actions menu (see [Managing your pipeline](#managing-your-pipeline)) for the changes to take effect.
+2. **Restart the replication pipeline** using the actions menu (see [Managing your pipeline](#managing-your-pipeline)) for the changes to take effect.
#### Removing tables from replication
-1. Remove the table from your publication using SQL:
+1. Remove the table from your Postgres publication using SQL:
```sql
-- Remove a single table from a publication
@@ -205,56 +389,57 @@ If your publication uses `FOR ALL TABLES` or `FOR TABLES IN SCHEMA`, new tables
alter publication pub_users_orders drop table orders, products;
```
-2. **Restart the pipeline** using the actions menu (see [Managing your pipeline](#managing-your-pipeline)) for the changes to take effect.
+2. **Restart the replication pipeline** using the actions menu (see [Managing your pipeline](#managing-your-pipeline)) for the changes to take effect.
-### How it works
+
-Once configured, replication:
+Deleted tables are automatically recreated by the pipeline. To permanently delete a table, pause the pipeline first or remove it from the publication before deleting. See the [FAQ](/docs/guides/database/replication/replication-faq#what-happens-if-a-table-is-deleted-at-the-destination) for details.
-1. **Captures** changes from your database using the publication
-2. **Loads** the data to your destination in near real-time batches
+
-Changes are sent in batches to optimize performance and reduce costs. The batch size and timing can be adjusted using the advanced settings.
+### How it works
-
+Once configured, replication:
-Replication currently performs data extraction and loading only, without transformation. Your data is replicated as-is to the destination.
+1. **Captures** changes from your Postgres database using Postgres publications and logical replication
+2. **Streams** the changes through the replication pipeline
+3. **Loads** the data to your destination in near real-time batches
-
+Changes are sent in batches to optimize performance and reduce costs. The batch size and timing can be adjusted using the advanced settings. The replication pipeline currently performs data extraction and loading only, without transformation - your data is replicated as-is to the destination.
### Troubleshooting
If you encounter issues during setup:
-- **Publication not appearing**: Ensure you created the publication via SQL and refresh the dashboard
-- **Tables not showing in publication**: Verify your tables have primary keys (required for replication)
+- **Publication not appearing**: Ensure you created the Postgres publication via SQL and refresh the dashboard
+- **Tables not showing in publication**: Verify your tables have primary keys (required for Postgres logical replication)
- **Pipeline failed to start**: Check the error message in the status view for specific details
-- **No data being replicated**: Verify your publication includes the correct tables and event types
+- **No data being replicated**: Verify your Postgres publication includes the correct tables and event types
For more troubleshooting help, see the [Replication FAQ](/docs/guides/database/replication/replication-faq).
### Limitations
-Replication has the following limitations that apply to all destinations:
+Replication has the following limitations:
-- **Primary keys required**: Tables must have primary keys
+- **Primary keys required**: Tables must have primary keys (Postgres logical replication requirement)
- **Custom data types**: Not supported
- **Schema changes**: Not automatically handled
- **No data transformation**: Data is replicated as-is without transformation
- **Data duplicates**: Duplicates can occur when stopping a pipeline if your database has transactions that take longer than a few minutes to complete. See [Can data duplicates occur?](/docs/guides/database/replication/replication-faq#can-data-duplicates-occur-during-pipeline-operations) for details
-Destination-specific limitations may also apply. See the [Iceberg](/docs/guides/database/replication/replication-iceberg#limitations) destination page for details.
+Destination-specific limitations (such as Iceberg's append-only log format or BigQuery's row size limits) are detailed in each destination tab in [Step 3](#step-3-add-a-destination) above.
### Future work
Replication is actively being developed. Planned improvements include:
-- **DDL support**: Automatic handling of schema changes (ALTER TABLE, ADD COLUMN, etc.)
+- **DDL support**: Automatic handling of Postgres schema changes (ALTER TABLE, ADD COLUMN, etc.)
- **Additional destinations**: Support for more data warehouses and analytics platforms
-We don't have public timelines for these features, but they represent our roadmap for making replication more robust and flexible.
+There are no public timelines for these features, but they represent the roadmap for making replication more robust and flexible.
### Next steps
-- [Monitor replication](/docs/guides/database/replication/replication-monitoring)
-- [View replication FAQ](/docs/guides/database/replication/replication-faq)
+- [Monitor Replication](/docs/guides/database/replication/replication-monitoring)
+- [View Replication FAQ](/docs/guides/database/replication/replication-faq)
diff --git a/apps/docs/content/guides/storage/analytics/replication.mdx b/apps/docs/content/guides/storage/analytics/replication.mdx
index 867e37a7b768d..842231cb05a36 100644
--- a/apps/docs/content/guides/storage/analytics/replication.mdx
+++ b/apps/docs/content/guides/storage/analytics/replication.mdx
@@ -5,15 +5,17 @@ subtitle: 'Replicate your PostgreSQL data to analytics buckets in real-time.'
-Expect rapid changes, limited features, and possible breaking updates. [Share feedback](https://github.com/orgs/supabase/discussions/40116) as we refine the experience and expand access.
+Expect rapid changes, limited features, and possible breaking updates. [Share feedback](https://github.com/orgs/supabase/discussions/40116) as the experience is refined and access is expanded.
By combining replication powered by [Supabase ETL](https://github.com/supabase/etl) with **Analytics Buckets**, you can build an end-to-end data warehouse solution that automatically syncs changes from your Postgres database to Iceberg tables.
+This guide provides a quickstart for replicating to Analytics Buckets. For complete replication configuration including other destinations, see the [Replication Setup Guide](/docs/guides/database/replication/replication-setup).
+
## How it works
-The replication pipeline captures changes (INSERT, UPDATE, DELETE) from your Postgres database in real-time and writes them to your analytics bucket. This allows you to maintain an always-up-to-date data warehouse without impacting your production workloads.
+The replication pipeline captures changes (INSERT, UPDATE, DELETE) from your Postgres database in real-time using Postgres logical replication and writes them to your analytics bucket. This allows you to maintain an always-up-to-date data warehouse without impacting your production workloads.
## Setup steps
@@ -43,7 +45,7 @@ CREATE PUBLICATION pub_warehouse
FOR TABLE users, orders, products;
```
-This publication will track all changes (INSERT, UPDATE, DELETE) for the specified tables. For advanced use cases, see the [Replication Configuration Guide](/docs/guides/database/replication/replication-setup).
+This publication will track all changes (INSERT, UPDATE, DELETE) for the specified tables. For advanced publication options like column filtering and row predicates, see the [Replication Setup Guide](/docs/guides/database/replication/replication-setup#advanced-publication-options).
### Step 3: Create the replication pipeline
@@ -70,6 +72,12 @@ Once started, you can monitor the pipeline status directly in the **Database > R
- **Sync Progress** - View the number of records replicated
- **Logs** - Check detailed logs for troubleshooting
+
+
+Deleted tables are automatically recreated by the pipeline. To permanently delete a table, pause the pipeline first or remove it from the publication before deleting. See the [FAQ](/docs/guides/database/replication/replication-faq#what-happens-if-i-delete-a-table-at-the-destination) for details.
+
+
+
## Next steps
Once data is flowing to your analytics bucket, you can:
@@ -78,4 +86,8 @@ Once data is flowing to your analytics bucket, you can:
- [Connect with PyIceberg](/docs/guides/storage/analytics/examples/pyiceberg)
- [Analyze with Apache Spark](/docs/guides/storage/analytics/examples/apache-spark)
-For advanced topics, see the [Replication Monitoring Guide](/docs/guides/database/replication/replication-monitoring).
+For detailed replication configuration and advanced topics:
+
+- [Replication Setup Guide](/docs/guides/database/replication/replication-setup) - Complete replication configuration including BigQuery and other destinations
+- [Replication Monitoring Guide](/docs/guides/database/replication/replication-monitoring) - Monitor replication pipeline status and health
+- [Replication FAQ](/docs/guides/database/replication/replication-faq) - Common questions about replication
diff --git a/apps/docs/public/img/database/replication/replication-enable-replication.png b/apps/docs/public/img/database/replication/replication-enable-replication.png
index e6c34f117a554..d1d79a68b8b6c 100644
Binary files a/apps/docs/public/img/database/replication/replication-enable-replication.png and b/apps/docs/public/img/database/replication/replication-enable-replication.png differ
diff --git a/apps/studio/components/interfaces/Auth/SmtpForm/SmtpForm.tsx b/apps/studio/components/interfaces/Auth/SmtpForm/SmtpForm.tsx
index 01be3cbe7dcad..c8fc282d05369 100644
--- a/apps/studio/components/interfaces/Auth/SmtpForm/SmtpForm.tsx
+++ b/apps/studio/components/interfaces/Auth/SmtpForm/SmtpForm.tsx
@@ -25,6 +25,7 @@ import {
Input_Shadcn_,
PrePostTab,
Switch,
+ cn,
} from 'ui'
import { Admonition } from 'ui-patterns'
import { FormItemLayout } from 'ui-patterns/form/FormItemLayout/FormItemLayout'
@@ -444,20 +445,45 @@ export const SmtpForm = () => {
>
)}
-
+
{form.formState.isDirty && (
-
+
+ {enableSmtp ? (
+ <>
+ Rate limit for sending emails will be increased to 30 and{' '}
+
+ can be adjusted
+ {' '}
+ after enabling custom SMTP
+ >
+ ) : (
+ 'Rate limit for sending emails will be reduced to 2 after disabling custom SMTP'
+ )}
+
)}
-
+
+ {form.formState.isDirty && (
+
+ )}
+
+
diff --git a/apps/studio/components/interfaces/Auth/Users/UsersV2.tsx b/apps/studio/components/interfaces/Auth/Users/UsersV2.tsx
index d336d36c9e91f..3f372efe3d8da 100644
--- a/apps/studio/components/interfaces/Auth/Users/UsersV2.tsx
+++ b/apps/studio/components/interfaces/Auth/Users/UsersV2.tsx
@@ -20,9 +20,9 @@ import { User, useUsersInfiniteQuery } from 'data/auth/users-infinite-query'
import { useSendEventMutation } from 'data/telemetry/send-event-mutation'
import { useIsFeatureEnabled } from 'hooks/misc/useIsFeatureEnabled'
import { useLocalStorageQuery } from 'hooks/misc/useLocalStorage'
+import { useQueryStateWithSelect } from 'hooks/misc/useQueryStateWithSelect'
import { useSelectedOrganizationQuery } from 'hooks/misc/useSelectedOrganization'
import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject'
-import { useQueryStateWithSelect } from 'hooks/misc/useQueryStateWithSelect'
import { cleanPointerEventsNoneOnBody, isAtBottom } from 'lib/helpers'
import { parseAsArrayOf, parseAsString, parseAsStringEnum, useQueryState } from 'nuqs'
import {
@@ -365,7 +365,7 @@ export const UsersV2 = () => {
isCountLoaded &&
isCountWithinThresholdForSortBy
) {
- if (specificFilterColumn === 'id' && localStorageFilter !== 'id') {
+ if (specificFilterColumn === 'email' && localStorageFilter !== 'email') {
setSpecificFilterColumn(localStorageFilter)
}
if (sortByValue === 'id:asc' && localStorageSortByValue !== 'id:asc') {
diff --git a/apps/studio/components/interfaces/Home/ServiceStatus.tsx b/apps/studio/components/interfaces/Home/ServiceStatus.tsx
index 95226b175ed6f..f977e8517e756 100644
--- a/apps/studio/components/interfaces/Home/ServiceStatus.tsx
+++ b/apps/studio/components/interfaces/Home/ServiceStatus.tsx
@@ -3,8 +3,8 @@ import { AlertTriangle, CheckCircle2, ChevronRight, Loader2 } from 'lucide-react
import Link from 'next/link'
import { useEffect, useState } from 'react'
-import { PopoverSeparator } from '@ui/components/shadcn/ui/popover'
import { useParams } from 'common'
+import { InlineLink } from 'components/ui/InlineLink'
import { useBranchesQuery } from 'data/branches/branches-query'
import { useEdgeFunctionServiceStatusQuery } from 'data/service-status/edge-functions-status-query'
import {
@@ -335,16 +335,27 @@ export const ServiceStatus = () => {
))}
- {allServicesOperational ? null : (
- <>
-
-
-
-
-
- Recently restored projects can take up to 5 minutes to become fully operational.
+ {!allServicesOperational && (
+
+
+
+
+
+
+ {isProjectNew ? 'New' : 'Recently restored'} projects can take up to{' '}
+ {SERVICE_STATUS_THRESHOLD} minutes to become fully operational.
+
+
+ If services stay unhealthy, refer to our{' '}
+
+ docs
+ {' '}
+ for more information.
+
- >
+
)}
diff --git a/apps/studio/components/interfaces/HomeNew/ServiceStatus.tsx b/apps/studio/components/interfaces/HomeNew/ServiceStatus.tsx
index 62278cfabeb3b..98b4ba12838e7 100644
--- a/apps/studio/components/interfaces/HomeNew/ServiceStatus.tsx
+++ b/apps/studio/components/interfaces/HomeNew/ServiceStatus.tsx
@@ -1,8 +1,9 @@
+import dayjs from 'dayjs'
import { AlertTriangle, CheckCircle2, ChevronRight, Loader2 } from 'lucide-react'
import Link from 'next/link'
-import { PopoverSeparator } from '@ui/components/shadcn/ui/popover'
import { useParams } from 'common'
+import { InlineLink } from 'components/ui/InlineLink'
import { SingleStat } from 'components/ui/SingleStat'
import { useBranchesQuery } from 'data/branches/branches-query'
import { useEdgeFunctionServiceStatusQuery } from 'data/service-status/edge-functions-status-query'
@@ -15,6 +16,8 @@ import { useSelectedProjectQuery } from 'hooks/misc/useSelectedProject'
import { DOCS_URL } from 'lib/constants'
import { InfoIcon, PopoverContent_Shadcn_, PopoverTrigger_Shadcn_, Popover_Shadcn_, cn } from 'ui'
+const SERVICE_STATUS_THRESHOLD = 5 // minutes
+
/**
* [Joshen] JFYI before we go live with this, we need to revisit the migrations section
* as I don't think it should live in the ServiceStatus component since its not indicative
@@ -248,6 +251,7 @@ export const ServiceStatus = () => {
// Check if project or branch is in a startup state
const isProjectNew =
+ dayjs.utc().diff(dayjs.utc(project?.inserted_at), 'minute') < SERVICE_STATUS_THRESHOLD ||
project?.status === 'COMING_UP' ||
(isBranch &&
(currentBranch?.status === 'CREATING_PROJECT' ||
@@ -301,7 +305,7 @@ export const ServiceStatus = () => {
value={{overallStatusLabel}}
/>
-
+
{services.map((service) => (
{
))}
- {allServicesOperational ? null : (
- <>
-
-
-
-
-
- Recently restored projects can take up to 5 minutes to become fully operational.
+ {!allServicesOperational && (
+
+
+
+
+
+
+ {isProjectNew ? 'New' : 'Recently restored'} projects can take up to{' '}
+ {SERVICE_STATUS_THRESHOLD} minutes to become fully operational.
+
+
+ If services stay unhealthy, refer to our{' '}
+
+ docs
+ {' '}
+ for more information.
+