diff --git a/examples/agents/proofreader-agent/.env.baseai.example b/examples/agents/proofreader-agent/.env.baseai.example
new file mode 100644
index 00000000..8c643651
--- /dev/null
+++ b/examples/agents/proofreader-agent/.env.baseai.example
@@ -0,0 +1,21 @@
+# !! SERVER SIDE ONLY !!
+# Keep all your API keys secret — use only on the server side.
+
+# TODO: ADD: Both in your production and local env files.
+# Langbase API key for your User or Org account.
+# How to get this API key https://langbase.com/docs/api-reference/api-keys
+LANGBASE_API_KEY=
+
+# TODO: ADD: LOCAL ONLY. Add only to local env files.
+# Following keys are needed for local pipe runs. For providers you are using.
+# For Langbase, please add the key to your LLM keysets.
+# Read more: Langbase LLM Keysets https://langbase.com/docs/features/keysets
+OPENAI_API_KEY=
+ANTHROPIC_API_KEY=
+COHERE_API_KEY=
+FIREWORKS_API_KEY=
+GOOGLE_API_KEY=
+GROQ_API_KEY=
+MISTRAL_API_KEY=
+PERPLEXITY_API_KEY=
+TOGETHER_API_KEY=
diff --git a/examples/agents/proofreader-agent/.gitignore b/examples/agents/proofreader-agent/.gitignore
new file mode 100644
index 00000000..c256453b
--- /dev/null
+++ b/examples/agents/proofreader-agent/.gitignore
@@ -0,0 +1,8 @@
+# baseai
+**/.baseai/
+node_modules
+.env
+package-lock.json
+pnpm-lock.yaml
+# env file
+.env
diff --git a/examples/agents/proofreader-agent/README.md b/examples/agents/proofreader-agent/README.md
new file mode 100644
index 00000000..ba4de3cb
--- /dev/null
+++ b/examples/agents/proofreader-agent/README.md
@@ -0,0 +1,53 @@
+![Proof Reader Agent by ⌘ BaseAI][cover]
+
+![License: MIT][mit] [![Fork on ⌘ Langbase][fork]][pipe]
+
+## Build a Proof Reader Agent with BaseAI framework — ⌘ Langbase
+
+This AI Agent is built using the BaseAI framework. It leverages an agentic pipe that integrates over 30+ LLMs (including OpenAI, Gemini, Mistral, Llama, Gemma, etc.) and can handle any data, with context sizes of up to 10M+ tokens, supported by memory. The framework is compatible with any front-end framework (such as React, Remix, Astro, Next.js), giving you, as a developer, the freedom to tailor your AI application exactly as you envision.
+
+## Features
+
+- Proof Reader Agent — Built with [BaseAI framework and agentic Pipe ⌘ ][qs].
+- Composable Agents — build and compose agents with BaseAI.
+- Add and Sync deployed pipe on Langbase locally npx baseai@latest add ([see the Code button][pipe]).
+
+## Learn more
+
+1. Check the [Learning path to build an agentic AI pipe with ⌘ BaseAI][learn]
+2. Read the [source code on GitHub][gh] for this agent example
+3. Go through Documentaion: [Pipe Quick Start][qs]
+4. Learn more about [Memory features in ⌘ BaseAI][memory]
+5. Learn more about [Tool calls support in ⌘ BaseAI][toolcalls]
+
+
+> NOTE:
+> This is a BaseAI project, you can deploy BaseAI pipes, memory and tool calls on Langbase.
+
+---
+
+## Authors
+
+This project is created by [Langbase][lb] team members, with contributions from:
+
+- Muhammad-Ali Danish - Software Engineer, [Langbase][lb]
+**_Built by ⌘ [Langbase.com][lb] — Ship hyper-personalized AI assistants with memory!_**
+
+
+[lb]: https://langbase.com
+[pipe]: https://langbase.com/examples/proofreader-agent
+[gh]: https://github.com/LangbaseInc/baseai/tree/main/examples/agents/proofreader-agent
+[cover]:https://raw.githubusercontent.com/LangbaseInc/docs-images/main/baseai/baseai-cover.png
+[download]:https://download-directory.github.io/?url=https://github.com/LangbaseInc/baseai/tree/main/examples/agents/proofreader-agent
+[learn]:https://baseai.dev/learn
+[memory]:https://baseai.dev/docs/memory/quickstart
+[toolcalls]:https://baseai.dev/docs/tools/quickstart
+[deploy]:https://baseai.dev/docs/deployment/authentication
+[signup]: https://langbase.fyi/io
+[qs]:https://baseai.dev/docs/pipe/quickstart
+[docs]:https://baseai.dev/docs
+[xaa]:https://x.com/MrAhmadAwais
+[xab]:https://x.com/AhmadBilalDev
+[local]:http://localhost:9000
+[mit]: https://img.shields.io/badge/license-MIT-blue.svg?style=for-the-badge&color=%23000000
+[fork]: https://img.shields.io/badge/FORK%20ON-%E2%8C%98%20Langbase-000000.svg?style=for-the-badge&logo=%E2%8C%98%20Langbase&logoColor=000000
\ No newline at end of file
diff --git a/examples/agents/proofreader-agent/baseai/baseai.config.ts b/examples/agents/proofreader-agent/baseai/baseai.config.ts
new file mode 100644
index 00000000..68e3cfa6
--- /dev/null
+++ b/examples/agents/proofreader-agent/baseai/baseai.config.ts
@@ -0,0 +1,18 @@
+import type { BaseAIConfig } from 'baseai';
+
+export const config: BaseAIConfig = {
+ log: {
+ isEnabled: false,
+ logSensitiveData: false,
+ pipe: true,
+ 'pipe.completion': true,
+ 'pipe.request': true,
+ 'pipe.response': true,
+ tool: false,
+ memory: false
+ },
+ memory: {
+ useLocalEmbeddings: false
+ },
+ envFilePath: '.env'
+};
diff --git a/examples/agents/proofreader-agent/baseai/memory/proofread-docs/documents/academic_para.txt b/examples/agents/proofreader-agent/baseai/memory/proofread-docs/documents/academic_para.txt
new file mode 100644
index 00000000..68602dff
--- /dev/null
+++ b/examples/agents/proofreader-agent/baseai/memory/proofread-docs/documents/academic_para.txt
@@ -0,0 +1 @@
+The computational demands of Large Language Models (LLMs) have escalated dramatically, commensurate with their increasing size and complexity. Training state-of-the-art LLMs necessitates vast arrays of high-performance GPUs, often numbering in the thousands, and can consume several megawatt-hours of electricity over periods extending to weeks or even months. This resource-intensive process raises pertinent questions about the models' environmental impact and the economic feasibility of their development for all but the most well-funded research institutions or technology companies. Moreover, the inference phase, while less demanding than training, still requires substantial computational resources, particularly for real-time applications, thereby limiting the deployment of these models in resource-constrained environments or edge devices. Consequently, there is a growing impetus in the field to develop more efficient architectures and training paradigms that can mitigate these computational burdens without compromising the remarkable capabilities that have made LLMs so transformative in natural language processing.
\ No newline at end of file
diff --git a/examples/agents/proofreader-agent/baseai/memory/proofread-docs/index.ts b/examples/agents/proofreader-agent/baseai/memory/proofread-docs/index.ts
new file mode 100644
index 00000000..8811ec50
--- /dev/null
+++ b/examples/agents/proofreader-agent/baseai/memory/proofread-docs/index.ts
@@ -0,0 +1,8 @@
+import { MemoryI } from '@baseai/core';
+
+const memoryProofreadDocs = (): MemoryI => ({
+ name: 'proofread-docs',
+ description: 'proofreader built with baseai and memory',
+});
+
+export default memoryProofreadDocs;
diff --git a/examples/agents/proofreader-agent/baseai/pipes/proofreader-agent.ts b/examples/agents/proofreader-agent/baseai/pipes/proofreader-agent.ts
new file mode 100644
index 00000000..dc3fcda2
--- /dev/null
+++ b/examples/agents/proofreader-agent/baseai/pipes/proofreader-agent.ts
@@ -0,0 +1,44 @@
+import { PipeI } from '@baseai/core';
+import memoryProofreadDocs from '../memory/proofread-docs';
+
+
+const pipeProofreaderAgent = (): PipeI => ({
+ // Replace with your API key https://langbase.com/docs/api-reference/api-keys
+ apiKey: process.env.LANGBASE_API_KEY!,
+ name: `proofreader-agent`,
+ description: `proofreader build with baseai with docs`,
+ status: `private`,
+ model: `openai:gpt-4o-mini`,
+ stream: true,
+ json: false,
+ store: true,
+ moderate: true,
+ top_p: 1,
+ max_tokens: 1000,
+ temperature: 0.7,
+ presence_penalty: 0,
+ frequency_penalty: 0,
+ stop: [],
+ tool_choice: 'auto',
+ parallel_tool_calls: true,
+ messages: [
+ {
+ role: 'system',
+ content:
+ "You are an expert proofreader and language editor specializing in academic, business English, and English linguistics. Your expertise covers science, engineering, and business case studies. You're proficient in CMOS, AP, MLA, and APA styles.\n\nKey Responsibilities:\nImprove academic and professional English\n1. Enhance clarity, structure, and style\n2. Apply appropriate style guidelines (CMOS, AP, MLA, APA)\n3. Maintain original meaning and technical terminology\n\nProcess:\n1. Analyze text context and content.\n2. Identify appropriate style if not specified.\n3. Apply linguistic and stylistic improvements.\n4. Provide reasoning for each modification.\n5. All original sentences should be placed according to the output format.\n\nOutput in format that can be rendered in linux terminal:\nOriginal: \t\n Modified: \t\nReason for Modification: \n\nOriginal: \n Modified: \nReason for Modification: \n...\n\nGuidelines:\nBold modified words/phrases.\nPreserve technical terms and proper nouns.\nAdhere to user instructions and specified style.\nEnsure consistency in formatting and citations.\nPrioritize clarity and scholarly/professional tone.\nBe concise in explanations.\nPlease output in the given Output Format above.\n\nIf no style is specified, identify and apply the most appropriate one based on the text's context."
+ },
+ { name: 'json', role: 'system', content: '' },
+ { name: 'safety', role: 'system', content: '' },
+ {
+ name: 'opening',
+ role: 'system',
+ content: 'Welcome to Langbase. Prompt away!'
+ },
+ { name: 'rag', role: 'system', content: "Below is some CONTEXT for you to answer the questions. ONLY answer from the CONTEXT. CONTEXT consists of multiple information chunks. Each chunk has a source mentioned at the end.\n\nFor each piece of response you provide, cite the source in brackets like so: [1].\n\nAt the end of the answer, always list each source with its corresponding number and provide the document name. like so [1] Filename.doc.\n\nIf you don't know the answer, just say that you don't know. Ask for more context and better questions if needed." }
+ ],
+ variables: [],
+ tools: [],
+ memory: [memoryProofreadDocs()]
+});
+
+export default pipeProofreaderAgent;
diff --git a/examples/agents/proofreader-agent/index.ts b/examples/agents/proofreader-agent/index.ts
new file mode 100644
index 00000000..f00d1fb4
--- /dev/null
+++ b/examples/agents/proofreader-agent/index.ts
@@ -0,0 +1,45 @@
+import 'dotenv/config';
+import { Pipe } from '@baseai/core';
+import inquirer from 'inquirer';
+import ora from 'ora';
+import chalk from 'chalk';
+import pipeProofreaderAgent from './baseai/pipes/proofreader-agent';
+
+const pipe = new Pipe(pipeProofreaderAgent());
+
+async function main() {
+ console.log(chalk.green('Hi I am Proofreader, enter \"proofread the attach document from CONTEXT\"!'));
+ console.log(chalk.yellow('Type "exit" to quit the application.\n'));
+
+ while (true) {
+ const { userMsg } = await inquirer.prompt([
+ {
+ type: 'input',
+ name: 'userMsg',
+ message: chalk.blue('Enter your query (or type "exit" to quit):'),
+ },
+ ]);
+
+ if (userMsg.toLowerCase() === 'exit') {
+ console.log(chalk.green('Goodbye!'));
+ break;
+ }
+
+ const spinner = ora('Processing your request...').start();
+
+ try {
+ const { completion: proofReaderResponse } = await pipe.run({
+ messages: [{ role: 'user', content: userMsg }],
+ });
+
+ spinner.stop();
+ console.log(chalk.cyan('Agent:'));
+ console.log(proofReaderResponse);
+ } catch (error) {
+ spinner.stop();
+ console.error(chalk.red('Error processing your request:'), error);
+ }
+ }
+}
+
+main();
\ No newline at end of file
diff --git a/examples/agents/proofreader-agent/package.json b/examples/agents/proofreader-agent/package.json
new file mode 100644
index 00000000..a3226141
--- /dev/null
+++ b/examples/agents/proofreader-agent/package.json
@@ -0,0 +1,24 @@
+{
+ "name": "proofreader-agent",
+ "version": "1.0.0",
+ "main": "index.ts",
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1",
+ "baseai": "baseai"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "@baseai/core": "^0.9.3",
+ "dotenv": "^16.4.5",
+ "inquirer": "^11.1.0",
+ "marked": "^13.0.3",
+ "marked-terminal": "^7.1.0",
+ "ora": "^8.1.0"
+ },
+ "devDependencies": {
+ "baseai": "^0.9.3"
+ }
+}