Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 58 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ A Model Context Protocol (MCP) server that enables semantic search and retrieval

## Version

Current version: 0.1.6
Current version: 0.1.7

## Features

Expand Down Expand Up @@ -140,6 +140,24 @@ For OpenAI instead of Ollama:
}
```

For OpenAI with a proxy service:
```json
{
"mcpServers": {
"ragdocs": {
"command": "node",
"args": ["C:/Users/YOUR_USERNAME/AppData/Roaming/npm/node_modules/@qpd-v/mcp-server-ragdocs/build/index.js"],
"env": {
"QDRANT_URL": "http://127.0.0.1:6333",
"EMBEDDING_PROVIDER": "openai",
"OPENAI_API_KEY": "your-openai-api-key",
"OPENAI_BASE_URL": "https://your-proxy-service.example.com/v1"
}
}
}
}
```

2. Using local development setup:
```json
{
Expand Down Expand Up @@ -201,6 +219,26 @@ Windows Setup with OpenAI:
}
```

Windows Setup with OpenAI proxy:
```json
{
"mcpServers": {
"ragdocs": {
"command": "C:\\Program Files\\nodejs\\node.exe",
"args": [
"C:\\Users\\YOUR_USERNAME\\AppData\\Roaming\\npm\\node_modules\\@qpd-v/mcp-server-ragdocs\\build\\index.js"
],
"env": {
"QDRANT_URL": "http://127.0.0.1:6333",
"EMBEDDING_PROVIDER": "openai",
"OPENAI_API_KEY": "your-openai-api-key",
"OPENAI_BASE_URL": "https://your-proxy-service.example.com/v1"
}
}
}
}
```

2. macOS Setup with Ollama:
```json
{
Expand Down Expand Up @@ -248,6 +286,19 @@ With OpenAI:
}
```

With OpenAI proxy:
```json
{
"env": {
"QDRANT_URL": "https://your-cluster-url.qdrant.tech",
"QDRANT_API_KEY": "your-qdrant-api-key",
"EMBEDDING_PROVIDER": "openai",
"OPENAI_API_KEY": "your-openai-api-key",
"OPENAI_BASE_URL": "https://your-proxy-service.example.com/v1"
}
}
```

### Environment Variables

#### Qdrant Configuration
Expand All @@ -263,6 +314,7 @@ With OpenAI:
- For OpenAI: defaults to 'text-embedding-3-small'
- `OLLAMA_URL` (optional): URL of your Ollama instance (defaults to http://localhost:11434)
- `OPENAI_API_KEY` (required if using OpenAI): Your OpenAI API key
- `OPENAI_BASE_URL` (optional): Base URL for OpenAI API when using proxy services

## Available Tools

Expand Down Expand Up @@ -356,6 +408,11 @@ MIT
- Check npm is in PATH: `npm -v`
- Verify global installation: `npm list -g @qpd-v/mcp-server-ragdocs`

5. **OpenAI API Proxy Issues**
- Ensure the base URL is properly formatted (e.g., https://your-proxy-service.example.com/v1)
- Verify the proxy service is compatible with OpenAI's API format
- Check if your API key is valid for the proxy service

For other issues, please check:
- Docker logs: `docker logs $(docker ps -q --filter ancestor=qdrant/qdrant)`
- Ollama status: `ollama list`
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@qpd-v/mcp-server-ragdocs",
"version": "0.1.6",
"version": "0.1.7",
"description": "A Model Context Protocol server for fetching and storing documentation in a vector database, enabling semantic search and retrieval to augment LLM capabilities with relevant documentation context.",
"private": false,
"type": "module",
Expand Down
10 changes: 7 additions & 3 deletions src/embeddings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,11 @@ export class OpenAIProvider implements EmbeddingProvider {
private client: OpenAI;
private model: string;

constructor(apiKey: string, model: string = 'text-embedding-3-small') {
this.client = new OpenAI({ apiKey });
constructor(apiKey: string, model: string = 'text-embedding-3-small', baseURL?: string) {
this.client = new OpenAI({
apiKey,
baseURL: baseURL || undefined
});
this.model = model;
}

Expand Down Expand Up @@ -91,6 +94,7 @@ export class EmbeddingService {
provider: 'ollama' | 'openai';
apiKey?: string;
model?: string;
baseURL?: string;
}): EmbeddingService {
switch (config.provider) {
case 'ollama':
Expand All @@ -102,7 +106,7 @@ export class EmbeddingService {
'OpenAI API key is required'
);
}
return new EmbeddingService(new OpenAIProvider(config.apiKey, config.model));
return new EmbeddingService(new OpenAIProvider(config.apiKey, config.model, config.baseURL));
default:
throw new McpError(
ErrorCode.InvalidParams,
Expand Down
11 changes: 9 additions & 2 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ const COLLECTION_NAME = 'documentation';
const EMBEDDING_PROVIDER = process.env.EMBEDDING_PROVIDER || 'ollama';
const EMBEDDING_MODEL = process.env.EMBEDDING_MODEL;
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const OPENAI_BASE_URL = process.env.OPENAI_BASE_URL;

interface QdrantCollectionConfig {
params: {
Expand Down Expand Up @@ -116,7 +117,8 @@ class RagDocsServer {
this.embeddingService = EmbeddingService.createFromConfig({
provider: EMBEDDING_PROVIDER as 'ollama' | 'openai',
model: EMBEDDING_MODEL,
apiKey: OPENAI_API_KEY
apiKey: OPENAI_API_KEY,
baseURL: OPENAI_BASE_URL
});

this.setupToolHandlers();
Expand Down Expand Up @@ -365,6 +367,10 @@ class RagDocsServer {
type: 'string',
description: 'Model to use for embeddings',
},
baseURL: {
type: 'string',
description: 'Base URL for OpenAI API (optional, for proxy services)',
},
},
required: ['text'],
},
Expand Down Expand Up @@ -409,7 +415,8 @@ class RagDocsServer {
const tempEmbeddingService = EmbeddingService.createFromConfig({
provider: args.provider || 'ollama',
apiKey: args.apiKey,
model: args.model
model: args.model,
baseURL: args.baseURL
});

const embedding = await tempEmbeddingService.generateEmbeddings(args.text);
Expand Down