diff --git a/docs/self-hosted/azure-devops.md b/docs/self-hosted/azure-devops.md
index 980a24fb..04681002 100644
--- a/docs/self-hosted/azure-devops.md
+++ b/docs/self-hosted/azure-devops.md
@@ -11,7 +11,7 @@ The self-hosted option is only available for CodeRabbit Enterprise customers wit
 
 :::
 
-## Create a Azure DevOps User
+## Create an Azure DevOps User
 
 - **Username**: Set the username to "CodeRabbit" for easier identification (optional).
 - **Profile Image**: Use the CodeRabbitAI logo for the user image (optional).
@@ -61,14 +61,10 @@ LLM_PROVIDER=azure-openai
 LLM_TIMEOUT=360000
 AZURE_OPENAI_ENDPOINT=<azure-openai-endpoint>
 AZURE_OPENAI_API_KEY=<key>
-## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments
+## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments. The production release of o1 model is inferior to the preview release as of now. Also, please make sure that the deployment name of o1-preview mentions "o1-preview" in it.
 AZURE_GPT4OMINI_DEPLOYMENT_NAME=<gpt-4o-mini-deployment-name>
 AZURE_O1MINI_DEPLOYMENT_NAME=[<o1-mini-deployment-name>]
-AZURE_O1_DEPLOYMENT_NAME=[<o1-preview-deployment-name>]
-## gpt-4o is optional
-AZURE_GPT4O_DEPLOYMENT_NAME=<gpt-4o-deployment-name, modelVersion: 2024-08-06>
-## gpt-4-turbo is optional: it’s expensive but provides better reviews than gpt-4o
-AZURE_GPT4TURBO_DEPLOYMENT_NAME=[<gpt-4-turbo-deployment-name, modelVersion: turbo-2024-04-09>]
+AZURE_O1_DEPLOYMENT_NAME=[<o1-deployment-name>]
 
 # if using AWS Bedrock
 AWS_ACCESS_KEY_ID=<aws-access-key>
@@ -115,7 +111,7 @@ docker pull <docker-registry>/coderabbit-agent:latest
 
 ### Verify the image is up
 
-You can query `/health` endpoint to verify that the coderabbit-agent service is up and running.
+You can query `/health` endpoint to verify that the `coderabbit-agent` service is up and running.
 
 ```bash
 curl 127.0.0.1:8080/health
diff --git a/docs/self-hosted/bitbucket.md b/docs/self-hosted/bitbucket.md
index bf5f45f1..a2879c65 100644
--- a/docs/self-hosted/bitbucket.md
+++ b/docs/self-hosted/bitbucket.md
@@ -1,5 +1,5 @@
 ---
-title: Use Self-Hosted CodeRabbit With  Bitbucket Datacenter
+title: Use Self-Hosted CodeRabbit With Bitbucket Datacenter
 sidebar_label: Bitbucket Datacenter
 description: Instructions to self-host CodeRabbit and integrate it with  Bitbucket Datacenter.
 sidebar_position: 4
@@ -36,9 +36,9 @@ Generate a personal access token for the CodeRabbit user to be added in the `.en
    - "Comment Added"
 3. **Add Webhook URL**: Enter the URL pointing to the CodeRabbit service, followed by `/bitbucket_server_webhooks` (e.g., `http://127.0.0.1:8080/bitbucket_server_webhooks`).
 
-## Prepare an `.env` file
+## Prepare a `.env` file
 
-Create an `.env` file with the following content:
+Create a `.env` file with the following content:
 
 ```bash
 # if using OpenAI
@@ -54,14 +54,10 @@ LLM_PROVIDER=azure-openai
 LLM_TIMEOUT=360000
 AZURE_OPENAI_ENDPOINT=<azure-openai-endpoint>
 AZURE_OPENAI_API_KEY=<key>
-## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments
+## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments. The production release of o1 model is inferior to the preview release as of now. Also, please make sure that the deployment name of o1-preview mentions "o1-preview" in it.
 AZURE_GPT4OMINI_DEPLOYMENT_NAME=<gpt-4o-mini-deployment-name>
 AZURE_O1MINI_DEPLOYMENT_NAME=[<o1-mini-deployment-name>]
-AZURE_O1_DEPLOYMENT_NAME=[<o1-preview-deployment-name>]
-## gpt-4o is optional
-AZURE_GPT4O_DEPLOYMENT_NAME=<gpt-4o-deployment-name, modelVersion: 2024-08-06>
-## gpt-4-turbo is optional: it’s expensive but provides better reviews than gpt-4o
-AZURE_GPT4TURBO_DEPLOYMENT_NAME=[<gpt-4-turbo-deployment-name, modelVersion: turbo-2024-04-09>]
+AZURE_O1_DEPLOYMENT_NAME=[<o1-deployment-name>]
 
 # if using AWS Bedrock
 AWS_ACCESS_KEY_ID=<aws-access-key>
@@ -108,7 +104,7 @@ docker pull us-docker.pkg.dev/coderabbitprod/self-hosted/coderabbit-agent:latest
 
 ### Verify the image is up
 
-You can query `/health` endpoint to verify that the coderabbit-agent service is up and running.
+You can query `/health` endpoint to verify that the `coderabbit-agent` service is up and running.
 
 ```bash
 curl 127.0.0.1:8080/health
diff --git a/docs/self-hosted/github.md b/docs/self-hosted/github.md
index 0c3937c8..7bb7ed56 100644
--- a/docs/self-hosted/github.md
+++ b/docs/self-hosted/github.md
@@ -65,14 +65,10 @@ LLM_PROVIDER=azure-openai
 LLM_TIMEOUT=360000
 AZURE_OPENAI_ENDPOINT=<azure-openai-endpoint>
 AZURE_OPENAI_API_KEY=<key>
-## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments
+## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments. The production release of o1 model is inferior to the preview release as of now. Also, please make sure that the deployment name of o1-preview mentions "o1-preview" in it.
 AZURE_GPT4OMINI_DEPLOYMENT_NAME=<gpt-4o-mini-deployment-name>
 AZURE_O1MINI_DEPLOYMENT_NAME=[<o1-mini-deployment-name>]
-AZURE_O1_DEPLOYMENT_NAME=[<o1-preview-deployment-name>]
-## gpt-4o is optional
-AZURE_GPT4O_DEPLOYMENT_NAME=<gpt-4o-deployment-name, modelVersion: 2024-08-06>
-## gpt-4-turbo is optional: it’s expensive but provides better reviews than gpt-4o
-AZURE_GPT4TURBO_DEPLOYMENT_NAME=[<gpt-4-turbo-deployment-name, modelVersion: turbo-2024-04-09>]
+AZURE_O1_DEPLOYMENT_NAME=[<o1-deployment-name>]
 
 # if using AWS Bedrock
 AWS_ACCESS_KEY_ID=<aws-access-key>
@@ -145,7 +141,7 @@ docker run --env-file .env --publish 127.0.0.1:8080:8080 <docker-registry>/coder
 
 ### Verify the image is up
 
-You can query `/health` endpoint to verify that the coderabbit-agent service is up and running.
+You can query `/health` endpoint to verify that the `coderabbit-agent` service is up and running.
 
 ```bash
 curl 127.0.0.1:8080/health
diff --git a/docs/self-hosted/gitlab.md b/docs/self-hosted/gitlab.md
index 55fefae2..ae402ebd 100644
--- a/docs/self-hosted/gitlab.md
+++ b/docs/self-hosted/gitlab.md
@@ -42,9 +42,9 @@ Consult official CodeRabbitAI documentation for a detailed [guide](https://docs.
    - Issues events
    - Merge request events
 
-## Prepare an `.env` file
+## Prepare a `.env` file
 
-Create an `.env` file with the following content:
+Create a `.env` file with the following content:
 
 ```bash
 # if using OpenAI
@@ -60,14 +60,10 @@ LLM_PROVIDER=azure-openai
 LLM_TIMEOUT=360000
 AZURE_OPENAI_ENDPOINT=<azure-openai-endpoint>
 AZURE_OPENAI_API_KEY=<key>
-## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments
+## it is recommended to use gpt-4o-mini, o1-mini, and o1-preview deployments. The production release of o1 model is inferior to the preview release as of now. Also, please make sure that the deployment name of o1-preview mentions "o1-preview" in it.
 AZURE_GPT4OMINI_DEPLOYMENT_NAME=<gpt-4o-mini-deployment-name>
 AZURE_O1MINI_DEPLOYMENT_NAME=[<o1-mini-deployment-name>]
-AZURE_O1_DEPLOYMENT_NAME=[<o1-preview-deployment-name>]
-## gpt-4o is optional
-AZURE_GPT4O_DEPLOYMENT_NAME=<gpt-4o-deployment-name, modelVersion: 2024-08-06>
-## gpt-4-turbo is optional: it’s expensive but provides better reviews than gpt-4o
-AZURE_GPT4TURBO_DEPLOYMENT_NAME=[<gpt-4-turbo-deployment-name, modelVersion: turbo-2024-04-09>]
+AZURE_O1_DEPLOYMENT_NAME=[<o1-deployment-name>]
 
 # if using AWS Bedrock
 AWS_ACCESS_KEY_ID=<aws-access-key>