Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Ollama Model for local usage of yorkie intelligence #297

Merged
merged 12 commits into from
Aug 20, 2024
Merged
9 changes: 5 additions & 4 deletions backend/.env.development
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@ DATABASE_URL=mongodb://localhost:27017/codepair

# GITHUB_CLIENT_ID: Client ID for authenticating with GitHub.
# To obtain a client ID, create an OAuth app at: https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app
GITHUB_CLIENT_ID=your_github_client_id_here
GITHUB_CLIENT_ID=""
sihyeong671 marked this conversation as resolved.
Show resolved Hide resolved
# GITHUB_CLIENT_SECRET: Client secret for authenticating with GitHub.
# To obtain a client ID, create an OAuth app at: https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/creating-an-oauth-app
GITHUB_CLIENT_SECRET=your_github_client_secret_here
GITHUB_CLIENT_SECRET=""
sihyeong671 marked this conversation as resolved.
Show resolved Hide resolved
# GITHUB_CALLBACK_URL: Callback URL for handling GitHub authentication response.
# Format: https://<backend_url>/auth/login/github
# Example: http://localhost:3000/auth/login/github (For development mode)
Expand Down Expand Up @@ -37,9 +37,10 @@ YORKIE_PROJECT_NAME=default
YORKIE_PROJECT_SECRET_KEY=""

# YORKIE_INTELLIGENCE: Whether to enable Yorkie Intelligence for collaborative editing.
# Set to true if Yorkie Intelligence is required.
# Set to ollama modelname if Yorkie Intelligence is required
# you can find llm model in https://ollama.com/library
# If set to false, OPENAI_API_KEY is not required.
YORKIE_INTELLIGENCE=false
YORKIE_INTELLIGENCE="gemma2:2b"
# OPENAI_API_KEY: API key for using the gpt-3.5-turbo model by Yorkie Intelligence.
# To obtain an API key, visit OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-api-key
OPENAI_API_KEY=your_openai_api_key_here
Expand Down
6 changes: 6 additions & 0 deletions backend/docker/docker-compose-full.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ services:
- "8080:8080"
- "8081:8081"

yorkie-intelligence:
image: "ollama/ollama:latest"
restart: always
ports:
- "11434:11434"

mongo:
build:
context: ./mongodb_replica
Expand Down
6 changes: 6 additions & 0 deletions backend/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ services:
- "8080:8080"
- "8081:8081"

yorkie-intelligence:
image: "ollama/ollama:latest"
restart: always
ports:
- "11434:11434"

mongo:
build:
context: ./mongodb_replica
Expand Down
159 changes: 134 additions & 25 deletions backend/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions backend/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
"@aws-sdk/s3-request-presigner": "^3.509.0",
"@langchain/community": "^0.0.21",
"@langchain/core": "^0.1.18",
"@langchain/ollama": "^0.0.4",
"@nestjs/common": "^10.0.0",
"@nestjs/config": "^3.1.1",
"@nestjs/core": "^10.0.0",
Expand Down
14 changes: 13 additions & 1 deletion backend/src/langchain/langchain.module.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,22 @@
import { Module } from "@nestjs/common";
import { ChatOpenAI } from "@langchain/openai";
import { ChatOllama } from "@langchain/ollama";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";

const chatModelFactory = {
provide: "ChatModel",
useFactory: () => new ChatOpenAI({ modelName: "gpt-4o-mini" }) as BaseChatModel,
useFactory: () => {
const modelType = process.env.YORKIE_INTELLIGENCE;
if (modelType === "gemma2:2b") {
return new ChatOllama({
model: modelType,
checkOrPullModel: true,
streaming: true,
});
sihyeong671 marked this conversation as resolved.
Show resolved Hide resolved
} else if (modelType === "openai") {
return new ChatOpenAI({ modelName: "gpt-4o-mini" }) as BaseChatModel;
}
},
};

@Module({
Expand Down
2 changes: 1 addition & 1 deletion backend/src/settings/settings.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ export class SettingsService {
async getSettings(): Promise<GetSettingsResponse> {
return {
yorkieIntelligence: {
enable: this.configService.get("YORKIE_INTELLIGENCE") === "true",
enable: this.configService.get("YORKIE_INTELLIGENCE") !== "false",
config: {
features: generateFeatureList(this.configService),
},
Expand Down