diff --git a/.github/actions/setup-compose/action.yaml b/.github/actions/setup-compose/action.yaml index 39c381e2..74925334 100644 --- a/.github/actions/setup-compose/action.yaml +++ b/.github/actions/setup-compose/action.yaml @@ -1,7 +1,7 @@ -name: "Setup Docker Compose" -description: "Installs latest Docker Compose" +name: Setup Docker Compose +description: Installs latest Docker Compose runs: - using: "composite" + using: composite steps: - name: Set up Docker Compose uses: docker/setup-compose-action@v1 diff --git a/.github/workflows/a2a.yaml b/.github/workflows/a2a.yaml index b6bbc7f2..19513041 100644 --- a/.github/workflows/a2a.yaml +++ b/.github/workflows/a2a.yaml @@ -3,14 +3,14 @@ name: A2A CI on: push: paths: - - '.github/workflows/a2a.yaml' - - 'a2a/**' + - .github/workflows/a2a.yaml + - a2a/** branches: - main pull_request: paths: - - '.github/workflows/a2a.yaml' - - 'a2a/**' + - .github/workflows/a2a.yaml + - a2a/** permissions: contents: read diff --git a/.github/workflows/adk-cerebras.yaml b/.github/workflows/adk-cerebras.yaml index 8edc9a91..d1598493 100644 --- a/.github/workflows/adk-cerebras.yaml +++ b/.github/workflows/adk-cerebras.yaml @@ -3,14 +3,14 @@ name: ADK-CEREBRAS CI on: push: paths: - - '.github/workflows/adk-cerebras.yaml' - - 'adk-cerebras/**' + - .github/workflows/adk-cerebras.yaml + - adk-cerebras/** branches: - main pull_request: paths: - - '.github/workflows/adk-cerebras.yaml' - - 'adk-cerebras/**' + - .github/workflows/adk-cerebras.yaml + - adk-cerebras/** permissions: contents: read @@ -38,6 +38,3 @@ jobs: - name: Check lint run: uv run ruff check - - #- name: Check types - # run: uv run pyright diff --git a/.github/workflows/adk.yaml b/.github/workflows/adk.yaml index 23f8136e..eaa2fb8b 100644 --- a/.github/workflows/adk.yaml +++ b/.github/workflows/adk.yaml @@ -3,14 +3,14 @@ name: ADK CI on: push: paths: - - '.github/workflows/adk.yaml' - - 'adk/**' + - .github/workflows/adk.yaml + - adk/** branches: - main pull_request: paths: - - '.github/workflows/adk.yaml' - - 'adk/**' + - .github/workflows/adk.yaml + - adk/** permissions: contents: read diff --git a/.github/workflows/agno.yaml b/.github/workflows/agno.yaml index 96819d25..c98fe1a1 100644 --- a/.github/workflows/agno.yaml +++ b/.github/workflows/agno.yaml @@ -3,14 +3,14 @@ name: Agno CI on: push: paths: - - '.github/workflows/agno.yaml' - - 'agno/**' + - .github/workflows/agno.yaml + - agno/** branches: - main pull_request: paths: - - '.github/workflows/agno.yaml' - - 'agno/**' + - .github/workflows/agno.yaml + - agno/** permissions: contents: read diff --git a/.github/workflows/crew-ai.yaml b/.github/workflows/crew-ai.yaml index 36c6fe05..4d9fccb3 100644 --- a/.github/workflows/crew-ai.yaml +++ b/.github/workflows/crew-ai.yaml @@ -3,14 +3,14 @@ name: Crew AI CI on: push: paths: - - '.github/workflows/crew-ai.yaml' - - 'crew-ai/**' + - .github/workflows/crew-ai.yaml + - crew-ai/** branches: - main pull_request: paths: - - '.github/workflows/crew-ai.yaml' - - 'crew-ai/**' + - .github/workflows/crew-ai.yaml + - crew-ai/** permissions: contents: read @@ -36,7 +36,7 @@ jobs: - name: Check format run: poetry run ruff format --check - + - name: Check lint run: poetry run ruff check diff --git a/.github/workflows/langgraph.yaml b/.github/workflows/langgraph.yaml index 2d9f4827..14b9727b 100644 --- a/.github/workflows/langgraph.yaml +++ b/.github/workflows/langgraph.yaml @@ -3,14 +3,14 @@ name: LangGraph CI on: push: paths: - - '.github/workflows/langgraph.yaml' - - 'langgraph/**' + - .github/workflows/langgraph.yaml + - langgraph/** branches: - main pull_request: paths: - - '.github/workflows/langgraph.yaml' - - 'langgraph/**' + - .github/workflows/langgraph.yaml + - langgraph/** permissions: contents: read diff --git a/.github/workflows/markdownlint.yaml b/.github/workflows/markdownlint.yaml index 1d2cdf97..f794447c 100644 --- a/.github/workflows/markdownlint.yaml +++ b/.github/workflows/markdownlint.yaml @@ -1,17 +1,17 @@ # .github/workflows/markdownlint.yml -name: Lint +name: Lint Markdown on: push: paths: - - '**/*.md' - - '.github/workflows/markdownlint.yaml' + - "**/*.md" + - .github/workflows/markdownlint.yaml branches: - main pull_request: paths: - - '**/*.md' - - '.github/workflows/markdownlint.yaml' + - "**/*.md" + - .github/workflows/markdownlint.yaml permissions: contents: read diff --git a/.github/workflows/yamllint.yaml b/.github/workflows/yamllint.yaml new file mode 100644 index 00000000..56456705 --- /dev/null +++ b/.github/workflows/yamllint.yaml @@ -0,0 +1,32 @@ +# .github/workflows/yamllint.yml +name: Lint YAML + +on: + push: + paths: + - "**/*.yaml" + - "**/*.yml" + - .github/workflows/yamllint.yaml + branches: + - main + pull_request: + paths: + - "**/*.yaml" + - "**/*.yml" + - .github/workflows/yamllint.yaml + +permissions: + contents: read + +jobs: + lint-yaml: + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install Task + uses: arduino/setup-task@8b35f53e4d5a51bf691c94c71f2c7222483206cb + + - name: Lint YAML Files + run: task lint:yaml diff --git a/.yamllint b/.yamllint new file mode 100644 index 00000000..e6e63afc --- /dev/null +++ b/.yamllint @@ -0,0 +1,19 @@ +extends: default +ignore: + - "**/.venv/**" + - "**/pnpm-lock.yaml" +rules: + document-start: + present: false + line-length: + max: 1024 + new-lines: + type: unix + new-line-at-end-of-file: enable + octal-values: + forbid-implicit-octal: true + quoted-strings: + required: only-when-needed + quote-type: double + truthy: + check-keys: false diff --git a/Dockerfile.tools b/Dockerfile.tools new file mode 100644 index 00000000..ec754f4a --- /dev/null +++ b/Dockerfile.tools @@ -0,0 +1,7 @@ +FROM davidanson/markdownlint-cli2:v0.18.1 AS markdownlint + +FROM python:3.13-slim AS yamllint +RUN pip install yamllint==1.37.1 +WORKDIR /workdir +ENTRYPOINT [ "yamllint" ] +CMD [] diff --git a/Taskfile.yaml b/Taskfile.yaml index 9e1b8b60..b33aa031 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -1,21 +1,42 @@ -version: '3' - -vars: - MARKDOWN_CLI2_CMD: docker run --rm -v $(pwd):/workdir davidanson/markdownlint-cli2:v0.18.1 +version: "3" tasks: + build:markdownlint: + desc: Build the markdownlint Docker image + cmds: + - docker build -f Dockerfile.tools -t markdownlint --target markdownlint . + dir: . + build:yamllint: + desc: Build the yamllint Docker image + cmds: + - docker build -f Dockerfile.tools -t yamllint --target yamllint . + dir: . + lint:markdown: desc: Lint Markdown files + deps: + - build:markdownlint cmds: - - "{{ .MARKDOWN_CLI2_CMD }}" + - docker run --rm -v $(pwd):/workdir markdownlint dir: . lint:markdown:fix: desc: Lint and Fix Markdown files + deps: + - build:markdownlint + cmds: + - docker run --rm -v $(pwd):/workdir markdownlint --fix + dir: . + + lint:yaml: + desc: Lint YAML files + deps: + - build:yamllint cmds: - - "{{ .MARKDOWN_CLI2_CMD }} --fix" + - docker run --rm -v $(pwd):/workdir -w /workdir yamllint . dir: . lint: deps: - lint:markdown + - lint:yaml desc: Lint all files diff --git a/a2a/agents/reviser.yaml b/a2a/agents/reviser.yaml index 1f48f2aa..0adf3abf 100644 --- a/a2a/agents/reviser.yaml +++ b/a2a/agents/reviser.yaml @@ -27,7 +27,7 @@ instructions: | * If the answer is accurate, you should output exactly the same answer text as you are given. * If the answer is inaccurate, disputed, or unsupported, then you should output your revised answer text. - In any case YOU MUST output only your answer. + In any case YOU MUST output only your answer. model: name: ${LLM_AGENT_MODEL_NAME} diff --git a/a2a/compose.dmr.yaml b/a2a/compose.dmr.yaml index 732f4a6c..7bfc69af 100644 --- a/a2a/compose.dmr.yaml +++ b/a2a/compose.dmr.yaml @@ -4,7 +4,7 @@ services: build: target: auditor-agent ports: - - "8080:8080" + - 8080:8080 environment: - CRITIC_AGENT_URL=http://critic-agent-a2a:8001 - REVISER_AGENT_URL=http://reviser-agent-a2a:8001 @@ -13,8 +13,8 @@ services: - reviser-agent-a2a models: gemma3: - endpoint_var: MODEL_RUNNER_URL - model_var: MODEL_RUNNER_MODEL + endpoint_var: MODEL_RUNNER_URL + model_var: MODEL_RUNNER_MODEL critic-agent-a2a: build: @@ -24,10 +24,10 @@ services: depends_on: - mcp-gateway models: - gemma3: - # specify which environment variables to inject into the container - endpoint_var: MODEL_RUNNER_URL - model_var: MODEL_RUNNER_MODEL + gemma3: + # specify which environment variables to inject into the container + endpoint_var: MODEL_RUNNER_URL + model_var: MODEL_RUNNER_MODEL reviser-agent-a2a: build: @@ -37,9 +37,9 @@ services: depends_on: - mcp-gateway models: - gemma3: - endpoint_var: MODEL_RUNNER_URL - model_var: MODEL_RUNNER_MODEL + gemma3: + endpoint_var: MODEL_RUNNER_URL + model_var: MODEL_RUNNER_MODEL mcp-gateway: # mcp-gateway secures your MCP servers @@ -56,5 +56,5 @@ models: # declare LLM models to pull and use gemma3: model: ai/gemma3:4B-Q4_0 - context_size: 10000 # 3.5 GB VRAM - #context_size: 131000 # 7.6 GB VRAM + context_size: 10000 # 3.5 GB VRAM + # context_size: 131000 # 7.6 GB VRAM diff --git a/a2a/compose.offload.yaml b/a2a/compose.offload.yaml index 07d6af51..fbb3659f 100644 --- a/a2a/compose.offload.yaml +++ b/a2a/compose.offload.yaml @@ -1,7 +1,7 @@ models: gemma3: # pre-pull the model on Docker Model Runner - model: ai/gemma3-qat:27B-Q4_K_M - context_size: 10000 # 18.6 GB VRAM - # context_size: 80000 # 28.37 GB VRAM - # context_size: 131000 # 35.5 GB VRAM + model: ai/gemma3-qat:27B-Q4_K_M + context_size: 10000 # 18.6 GB VRAM + # context_size: 80000 # 28.37 GB VRAM + # context_size: 131000 # 35.5 GB VRAM diff --git a/a2a/compose.yaml b/a2a/compose.yaml index 0bd11815..a123d279 100644 --- a/a2a/compose.yaml +++ b/a2a/compose.yaml @@ -4,7 +4,7 @@ services: build: target: auditor-agent ports: - - "8080:8080" + - 8080:8080 environment: - CRITIC_AGENT_URL=http://critic-agent-a2a:8001 - REVISER_AGENT_URL=http://reviser-agent-a2a:8001 diff --git a/adk-cerebras/compose.yml b/adk-cerebras/compose.yml index 4f6af8da..8d45710d 100644 --- a/adk-cerebras/compose.yml +++ b/adk-cerebras/compose.yml @@ -23,10 +23,10 @@ services: DEVDUCK_AGENT_DESCRIPTION: Main development assistant and project coordinator DEVDUCK_AGENT_INSTRUCTION: | You are DevDuck. All your answers MUST start with "DEVDUCK SPEAKING:". - You are a Go (Golang) programming expert designed to coordinate Golang experts, + You are a Go (Golang) programming expert designed to coordinate Golang experts, The experts are Bob and Cerebras. - - If the user specifically wants to speak with cerebras, route to the cerebras_agent. - - If the user specifically wants to speak with bob, route to the bob_agent. + - If the user specifically wants to speak with cerebras, route to the cerebras_agent. + - If the user specifically wants to speak with bob, route to the bob_agent. # -------------------------------------- # Bob Agent Configuration @@ -35,13 +35,13 @@ services: BOB_AGENT_DESCRIPTION: Useful agent for general development tasks and project coordination BOB_AGENT_INSTRUCTION: | You are Bob. All your answers MUST start with "BOB SPEAKING:". - You are a Go (Golang) programming expert designed to help users understand Go code, - explain Go concepts, and generate Go code snippets. + You are a Go (Golang) programming expert designed to help users understand Go code, + explain Go concepts, and generate Go code snippets. Your responses should be accurate, practical, and educational. ### IMPORTANT: 1. **No Parent Agent Routing:** Do not route back to the parent agent unless the user explicitly requests it. - 2. If the user specifically wants to speak with cerebras, route to the cerebras_agent. + 2. If the user specifically wants to speak with cerebras, route to the cerebras_agent. # -------------------------------------- # Cerebras Agent Configuration @@ -53,13 +53,13 @@ services: CEREBRAS_AGENT_DESCRIPTION: Specialized agent for advanced computational tasks and complex problem-solving CEREBRAS_AGENT_INSTRUCTION: | You are Cerebras. All your answers MUST start with "CEREBRAS SPEAKING:". - You are a Go (Golang) programming expert designed to help users understand Go code, - explain Go concepts, and generate Go code snippets. + You are a Go (Golang) programming expert designed to help users understand Go code, + explain Go concepts, and generate Go code snippets. Your responses should be accurate, practical, and educational. ### IMPORTANT: 1. **No Parent Agent Routing:** Do not route back to the parent agent unless the user explicitly requests it. - 2. If the user specifically wants to speak with bob, route to the bob_agent. + 2. If the user specifically wants to speak with bob, route to the bob_agent. # NOTE: Define models models: @@ -67,4 +67,3 @@ models: model: ai/qwen2.5:latest qwen3: model: unsloth/qwen3-gguf:4B-UD-Q4_K_XL - diff --git a/adk-sock-shop/compose.gcloud.yaml b/adk-sock-shop/compose.gcloud.yaml index 0e75bc2f..929487be 100644 --- a/adk-sock-shop/compose.gcloud.yaml +++ b/adk-sock-shop/compose.gcloud.yaml @@ -6,7 +6,7 @@ services: image: weaveworksdemos/front-end:0.3.12 hostname: front-end ports: - - "9090:8079" + - 9090:8079 restart: always cap_drop: - all @@ -21,7 +21,7 @@ services: - NET_BIND_SERVICE read_only: true ports: - - "8081:80" + - 8081:80 catalogue-db: image: weaveworksdemos/catalogue-db:0.3.0 hostname: catalogue-db @@ -42,9 +42,9 @@ services: volumes: - ./data/mongodb:/docker-entrypoint-initdb.d:ro - mongodb_data:/data/db - command: ["mongod", "--quiet", "--logpath", "/var/log/mongodb/mongod.log", "--logappend", "--setParameter", "logComponentVerbosity={network:{verbosity:0}}"] + command: [mongod, --quiet, --logpath, /var/log/mongodb/mongod.log, --logappend, --setParameter, "logComponentVerbosity={network:{verbosity:0}}"] healthcheck: - test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + test: [CMD, mongosh, --eval, db.adminCommand('ping')] interval: 10s timeout: 5s retries: 5 @@ -82,7 +82,7 @@ services: secrets: - openai-api-key models: - qwen3 : + qwen3: endpoint_var: MODEL_RUNNER_URL model_var: MODEL_RUNNER_MODEL @@ -109,9 +109,7 @@ models: qwen3: # pre-pull the model when starting Docker Model Runner model: ai/qwen3:14B-Q6_K - #context_size: 131000 # 35.5 GB VRAM - #runtime_flags: - #- --no-prefill-assistant + # context_size: 131000 # 35.5 GB VRAM volumes: mongodb_data: diff --git a/adk-sock-shop/compose.sockstore.yaml b/adk-sock-shop/compose.sockstore.yaml index 051ec6bb..6cc3f072 100644 --- a/adk-sock-shop/compose.sockstore.yaml +++ b/adk-sock-shop/compose.sockstore.yaml @@ -6,7 +6,7 @@ services: image: weaveworksdemos/front-end:0.3.12 hostname: front-end ports: - - "9090:8079" + - 9090:8079 restart: always cap_drop: - all @@ -21,7 +21,7 @@ services: - NET_BIND_SERVICE read_only: true ports: - - "8081:80" + - 8081:80 depends_on: - catalogue-db catalogue-db: @@ -46,9 +46,9 @@ services: volumes: - ./data/mongodb:/docker-entrypoint-initdb.d:ro - mongodb_data:/data/db - command: ["mongod", "--quiet", "--logpath", "/var/log/mongodb/mongod.log", "--logappend", "--setParameter", "logComponentVerbosity={network:{verbosity:0}}"] + command: [mongod, --quiet, --logpath, /var/log/mongodb/mongod.log, --logappend, --setParameter, "logComponentVerbosity={network:{verbosity:0}}"] healthcheck: - test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + test: [CMD, mongosh, --eval, db.adminCommand('ping')] interval: 10s timeout: 5s retries: 5 diff --git a/adk-sock-shop/compose.yaml b/adk-sock-shop/compose.yaml index 3394fc03..39ff1352 100644 --- a/adk-sock-shop/compose.yaml +++ b/adk-sock-shop/compose.yaml @@ -6,7 +6,7 @@ services: image: weaveworksdemos/front-end:0.3.12 hostname: front-end ports: - - "9090:8079" + - 9090:8079 restart: always cap_drop: - all @@ -21,7 +21,7 @@ services: - NET_BIND_SERVICE read_only: true ports: - - "8081:80" + - 8081:80 depends_on: - catalogue-db catalogue-db: @@ -46,9 +46,9 @@ services: volumes: - ./data/mongodb:/docker-entrypoint-initdb.d:ro - mongodb_data:/data/db - command: ["mongod", "--quiet", "--logpath", "/var/log/mongodb/mongod.log", "--logappend", "--setParameter", "logComponentVerbosity={network:{verbosity:0}}"] + command: [mongod, --quiet, --logpath, /var/log/mongodb/mongod.log, --logappend, --setParameter, "logComponentVerbosity={network:{verbosity:0}}"] healthcheck: - test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"] + test: [CMD, mongosh, --eval, db.adminCommand('ping')] interval: 10s timeout: 5s retries: 5 @@ -84,7 +84,7 @@ services: secrets: - openai-api-key models: - qwen3 : + qwen3: endpoint_var: MODEL_RUNNER_URL model_var: MODEL_RUNNER_MODEL @@ -111,9 +111,7 @@ models: qwen3: # pre-pull the model when starting Docker Model Runner model: ai/qwen3:14B-Q6_K - #context_size: 131000 # 35.5 GB VRAM - #runtime_flags: - #- --no-prefill-assistant + # context_size: 131000 # 35.5 GB VRAM volumes: mongodb_data: diff --git a/adk/compose.offload.yaml b/adk/compose.offload.yaml index 07d6af51..fbb3659f 100644 --- a/adk/compose.offload.yaml +++ b/adk/compose.offload.yaml @@ -1,7 +1,7 @@ models: gemma3: # pre-pull the model on Docker Model Runner - model: ai/gemma3-qat:27B-Q4_K_M - context_size: 10000 # 18.6 GB VRAM - # context_size: 80000 # 28.37 GB VRAM - # context_size: 131000 # 35.5 GB VRAM + model: ai/gemma3-qat:27B-Q4_K_M + context_size: 10000 # 18.6 GB VRAM + # context_size: 80000 # 28.37 GB VRAM + # context_size: 131000 # 35.5 GB VRAM diff --git a/adk/compose.yaml b/adk/compose.yaml index 6e8886e9..b3a12da2 100644 --- a/adk/compose.yaml +++ b/adk/compose.yaml @@ -4,14 +4,14 @@ services: context: . ports: # expose port for web interface - - "8080:8080" + - 8080:8080 environment: # point adk at the MCP gateway - MCPGATEWAY_ENDPOINT=http://mcp-gateway:8811/sse depends_on: - mcp-gateway models: - gemma3 : + gemma3: endpoint_var: MODEL_RUNNER_URL model_var: MODEL_RUNNER_MODEL @@ -28,6 +28,6 @@ models: gemma3: # pre-pull the model when starting Docker Model Runner model: ai/gemma3:4B-Q4_0 - context_size: 10000 # 3.5 GB VRAM + context_size: 10000 # 3.5 GB VRAM # increase context size to handle search results - # context_size: 131000 # 7.6 GB VRAM + # context_size: 131000 # 7.6 GB VRAM diff --git a/agno/README.md b/agno/README.md index b94cddf0..9c2aaba5 100644 --- a/agno/README.md +++ b/agno/README.md @@ -37,7 +37,7 @@ a **Writer** agent that summarizes and categorizes them into a comprehensive mar github.personal_access_token=ghp_XXXXX ``` - or + or - set the MCP secret in Docker Desktop and export if you're running with Docker Offload. diff --git a/agno/compose.offload.yaml b/agno/compose.offload.yaml index d7090614..079a7c2e 100644 --- a/agno/compose.offload.yaml +++ b/agno/compose.offload.yaml @@ -8,7 +8,7 @@ services: models: qwen3-large: - model: ai/qwen3:30B-A3B-Q4_K_M # 17.28 GB - context_size: 15000 # 20 GB VRAM + model: ai/qwen3:30B-A3B-Q4_K_M # 17.28 GB + context_size: 15000 # 20 GB VRAM # increase context size to handle larger results - # context_size: 41000 # 24 GB VRAM \ No newline at end of file + # context_size: 41000 # 24 GB VRAM diff --git a/agno/compose.yaml b/agno/compose.yaml index a135809c..90635792 100644 --- a/agno/compose.yaml +++ b/agno/compose.yaml @@ -5,7 +5,7 @@ services: build: context: agent ports: - - "7777:7777" + - 7777:7777 environment: # point agents at the MCP gateway - MCPGATEWAY_URL=http://mcp-gateway:8811 @@ -24,7 +24,7 @@ services: build: context: agent-ui ports: - - "3000:3000" + - 3000:3000 environment: - AGENTS_URL=http://localhost:7777 depends_on: @@ -43,31 +43,29 @@ services: - --servers=github-official # add an interceptor to format and simplify the output of the GitHub issues tool # this interceptor will convert the JSON output of the tool into a CSV format - - --interceptor - - after:exec:cat | jq '.content[0].text = (.content[0].text | fromjson | map(select(. != null) | [(.number // ""), (.state // ""), (.title // ""), (.user.login // ""), ((.labels // []) | map(.name) | join(";")), (.created_at // "")] | @csv) | join("\n"))' secrets: - mcp_secret models: qwen3-small: # pre-pull the model when starting Docker Model Runner - #model: ai/qwen3:30B-A3B-Q4_K_M - model: ai/qwen3:8B-Q4_0 # 4.44 GB - context_size: 15000 # 7 GB VRAM + # model: ai/qwen3:30B-A3B-Q4_K_M + model: ai/qwen3:8B-Q4_0 # 4.44 GB + context_size: 15000 # 7 GB VRAM # increase context size to handle larger results - # context_size: 41000 # 13 GB VRAM + # context_size: 41000 # 13 GB VRAM qwen3-medium: - model: ai/qwen3:14B-Q6_K # 11.28 GB - context_size: 15000 # 15 GB VRAM + model: ai/qwen3:14B-Q6_K # 11.28 GB + context_size: 15000 # 15 GB VRAM # increase context size to handle larger results - # context_size: 41000 # 21 GB VRAM + # context_size: 41000 # 21 GB VRAM - # The qwen3-large model is defined in compose.offload.yaml - # because it requires more resources and is intended to run with Docker Offload. - # A recommended practice with Docker Compose is to isolate specialized configurations - # in override files. These files modify the base setup when applied, - # in our case: - # docker compose -f compose.yaml -f compose.offload.yaml up --build + # The qwen3-large model is defined in compose.offload.yaml + # because it requires more resources and is intended to run with Docker Offload. + # A recommended practice with Docker Compose is to isolate specialized configurations + # in override files. These files modify the base setup when applied, + # in our case: + # docker compose -f compose.yaml -f compose.offload.yaml up --build # mount the secrets file for MCP servers secrets: diff --git a/crew-ai/compose.yaml b/crew-ai/compose.yaml index 2bc2a55b..60deb71d 100644 --- a/crew-ai/compose.yaml +++ b/crew-ai/compose.yaml @@ -1,10 +1,10 @@ services: agents: build: - context: . + context: . environment: - MCP_SERVER_URL=http://mcp-gateway:8811/sse - restart: no + restart: "no" depends_on: - mcp-gateway models: diff --git a/embabel/README.md b/embabel/README.md index 9b60b912..9f5bedd8 100644 --- a/embabel/README.md +++ b/embabel/README.md @@ -20,7 +20,8 @@ It demonstrates the power of the [Embabel agent framework](https://www.github.co ### Clone the project repository > [!IMPORTANT] -> The compose.yaml file is in an [upstream repository](https://github.com/embabel/tripper). To try out this project, you'll have to first clone the repo. +> The compose.yaml file is in an [upstream repository](https://github.com/embabel/tripper). +> To try out this project, you'll have to first clone the repo. ```sh git clone git@github.com:embabel/tripper.git diff --git a/langchaingo/compose.yaml b/langchaingo/compose.yaml index 7d76d30d..9076416e 100644 --- a/langchaingo/compose.yaml +++ b/langchaingo/compose.yaml @@ -12,7 +12,7 @@ services: models: gemma: endpoint_var: MODEL_RUNNER_URL - model_var: MODEL_RUNNER_MODEL + model_var: MODEL_RUNNER_MODEL mcp-gateway: # mcp-gateway secures your MCP servers image: docker/mcp-gateway:latest diff --git a/langgraph/compose.yaml b/langgraph/compose.yaml index da9bc154..9cf719e3 100644 --- a/langgraph/compose.yaml +++ b/langgraph/compose.yaml @@ -6,7 +6,7 @@ services: POSTGRES_PASSWORD: password POSTGRES_DB: database healthcheck: - test: ["CMD-SHELL", "pg_isready -U user -d database"] + test: [CMD-SHELL, pg_isready -U user -d database] interval: 1s timeout: 3s retries: 10 diff --git a/spring-ai/compose.yaml b/spring-ai/compose.yaml index f8aac567..6812bbc4 100644 --- a/spring-ai/compose.yaml +++ b/spring-ai/compose.yaml @@ -14,7 +14,7 @@ services: models: gemma: endpoint_var: MODEL_RUNNER_URL - model_var: MODEL_RUNNER_MODEL + model_var: MODEL_RUNNER_MODEL mcp-gateway: # mcp-gateway secures your MCP servers image: docker/mcp-gateway:latest