diff --git a/applications/chatops/slack-bot/.gitignore b/applications/chatops/slack-bot/.gitignore index e372cf2..7cdc743 100644 --- a/applications/chatops/slack-bot/.gitignore +++ b/applications/chatops/slack-bot/.gitignore @@ -30,3 +30,10 @@ npm-debug.log* # TypeScript *.tsbuildinfo + +# Performance test results +performance-tests/results/*.json +performance-tests/results/*.html + +# Environment files +performance-tests/.env diff --git a/applications/chatops/slack-bot/Makefile b/applications/chatops/slack-bot/Makefile index 25f346c..5dc5f82 100644 --- a/applications/chatops/slack-bot/Makefile +++ b/applications/chatops/slack-bot/Makefile @@ -48,6 +48,16 @@ help: @echo " make deploy- - Upload to S3 → Deploy" @echo " make deploy-build - Deploy build worker from local (default)" @echo "" + @echo "$(GREEN)Performance Testing:$(NC)" + @echo " make perf-test - Run test (default: minimal ~1min)" + @echo " Options: PROFILE=minimal|light|full" + @echo " make perf-analyze - Analyze latest test results" + @echo " make perf-analyze-quiet - Analyze (quiet mode, for CI/CD)" + @echo " make perf-summary - Quick summary" + @echo " make perf-report - Generate HTML report" + @echo " make perf-capture - Capture HTML as screenshot" + @echo " make perf-clean - Clean test results" + @echo "" @echo "$(GREEN)Deploy Lambda (Advanced):$(NC)" @echo " make deploy--local - Deploy from local dist/ (fast, no upload)" @echo "" @@ -224,3 +234,70 @@ clean-all: clean @echo "$(BLUE)Cleaning all dependencies...$(NC)" rm -rf node_modules @echo "$(GREEN)✓ Deep clean complete$(NC)" + +# ----------------------------------------------------------------------------- +# Performance Testing +# ----------------------------------------------------------------------------- + +# Test profile configuration +PERF_PROFILE ?= minimal +PERF_CONFIG_minimal = artillery-echo-minimal.yml +PERF_CONFIG_light = artillery-echo-light.yml +PERF_CONFIG_full = artillery-config.yml + +perf-test-install: + @if ! command -v artillery > /dev/null; then \ + echo "$(BLUE)Installing Artillery...$(NC)"; \ + npm install -g artillery artillery-plugin-metrics-by-endpoint; \ + fi + @cd performance-tests && npm install --no-save @aws-sdk/client-ssm 2>/dev/null || true + +perf-test: perf-test-install + @echo "$(BLUE)Running performance test [$(PERF_PROFILE)]...$(NC)" + @if [ -z "$(API_GATEWAY_URL)" ]; then \ + API_URL=$$(cd $(SANDBOX_ROOT)/slack-api-gateway && terragrunt output -raw api_gateway_url 2>/dev/null); \ + if [ -z "$$API_URL" ]; then \ + echo "$(YELLOW)✗ Could not get API Gateway URL$(NC)"; \ + echo "$(YELLOW) Set manually: API_GATEWAY_URL=https://xxx...$(NC)"; \ + exit 1; \ + fi; \ + else \ + API_URL=$(API_GATEWAY_URL); \ + fi; \ + CONFIG=$(PERF_CONFIG_$(PERF_PROFILE)); \ + export API_GATEWAY_URL=$$API_URL ENVIRONMENT=$(ENVIRONMENT) AWS_REGION=$(REGION); \ + cd performance-tests && artillery run $$CONFIG --output results/test-$$(date +%Y%m%d-%H%M%S).json + +perf-analyze: + @echo "$(BLUE)Analyzing latest test results...$(NC)" + @cd performance-tests && ./analyze-performance.sh --from-test + +perf-analyze-quiet: + @cd performance-tests && ./analyze-e2e-json.sh 2>&1 | grep -E '(✓|Analyzing|Error)' || true + +perf-summary: + @LATEST=$$(ls -t performance-tests/results/*.json 2>/dev/null | grep -v '\.metrics\.json' | head -n1); \ + if [ -z "$$LATEST" ]; then echo "$(YELLOW)No results$(NC)"; exit 1; fi; \ + echo "$(GREEN)$$LATEST$(NC)"; \ + jq -r '"Requests: " + (.aggregate.counters["http.requests"] | tostring), \ + "P50: " + (.aggregate.summaries["http.response_time"].median | tostring) + "ms", \ + "P95: " + (.aggregate.summaries["http.response_time"].p95 | tostring) + "ms", \ + "Errors: " + ((.aggregate.counters["errors.total"] // 0) | tostring)' $$LATEST + +perf-report: + @LATEST=$$(ls -t performance-tests/results/*.json 2>/dev/null | grep -v '\.metrics\.json' | head -n1); \ + if [ -z "$$LATEST" ]; then echo "$(YELLOW)No results$(NC)"; exit 1; fi; \ + REPORT=$${LATEST%.json}.html; \ + node performance-tests/render-report.js $$LATEST $$REPORT; \ + echo "$(GREEN)✓ Report: $$REPORT$(NC)" + +perf-capture: + @LATEST=$$(ls -t performance-tests/results/*.html 2>/dev/null | head -n1); \ + if [ -z "$$LATEST" ]; then echo "$(YELLOW)No HTML report found$(NC)"; exit 1; fi; \ + OUTPUT=$${LATEST%.html}.png; \ + node performance-tests/capture-report.js $$LATEST $$OUTPUT; \ + echo "$(GREEN)✓ Screenshot: $$OUTPUT$(NC)" + +perf-clean: + @rm -rf performance-tests/results/*.json performance-tests/results/*.html performance-tests/results/*.png + @echo "$(GREEN)✓ Cleaned$(NC)" \ No newline at end of file diff --git a/applications/chatops/slack-bot/package-lock.json b/applications/chatops/slack-bot/package-lock.json index 403d0c8..94c5f6a 100644 --- a/applications/chatops/slack-bot/package-lock.json +++ b/applications/chatops/slack-bot/package-lock.json @@ -22,6 +22,7 @@ "eslint": "^8.56.0", "jest": "^29.7.0", "nock": "^13.5.0", + "puppeteer": "^24.34.0", "ts-jest": "^29.1.1", "ts-node": "^10.9.2", "typescript": "^5.3.3" @@ -1974,6 +1975,28 @@ "node": ">= 8" } }, + "node_modules/@puppeteer/browsers": { + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/@puppeteer/browsers/-/browsers-2.11.0.tgz", + "integrity": "sha512-n6oQX6mYkG8TRPuPXmbPidkUbsSRalhmaaVAQxvH1IkQy63cwsH+kOjB3e4cpCDHg0aSvsiX9bQ4s2VB6mGWUQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "debug": "^4.4.3", + "extract-zip": "^2.0.1", + "progress": "^2.0.3", + "proxy-agent": "^6.5.0", + "semver": "^7.7.3", + "tar-fs": "^3.1.1", + "yargs": "^17.7.2" + }, + "bin": { + "browsers": "lib/cjs/main-cli.js" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/@sinclair/typebox": { "version": "0.27.8", "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", @@ -2595,6 +2618,13 @@ "node": ">=18.0.0" } }, + "node_modules/@tootallnate/quickjs-emscripten": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", + "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", + "dev": true, + "license": "MIT" + }, "node_modules/@tsconfig/node10": { "version": "1.0.12", "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", @@ -2779,6 +2809,17 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/yauzl": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz", + "integrity": "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@typescript-eslint/eslint-plugin": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", @@ -3020,6 +3061,16 @@ "node": ">=0.4.0" } }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -3130,6 +3181,19 @@ "node": ">=8" } }, + "node_modules/ast-types": { + "version": "0.13.4", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.13.4.tgz", + "integrity": "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==", + "dev": true, + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/async-hook-jl": { "version": "1.7.6", "resolved": "https://registry.npmjs.org/async-hook-jl/-/async-hook-jl-1.7.6.tgz", @@ -3206,6 +3270,21 @@ "proxy-from-env": "^1.1.0" } }, + "node_modules/b4a": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.7.3.tgz", + "integrity": "sha512-5Q2mfq2WfGuFp3uS//0s6baOJLMoVduPYVeNmDYxu5OUA1/cBfvr2RIS7vi62LdNj/urk1hfmj867I3qt6uZ7Q==", + "dev": true, + "license": "Apache-2.0", + "peerDependencies": { + "react-native-b4a": "*" + }, + "peerDependenciesMeta": { + "react-native-b4a": { + "optional": true + } + } + }, "node_modules/babel-jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", @@ -3339,6 +3418,103 @@ "dev": true, "license": "MIT" }, + "node_modules/bare-events": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.8.2.tgz", + "integrity": "sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ==", + "dev": true, + "license": "Apache-2.0", + "peerDependencies": { + "bare-abort-controller": "*" + }, + "peerDependenciesMeta": { + "bare-abort-controller": { + "optional": true + } + } + }, + "node_modules/bare-fs": { + "version": "4.5.2", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-4.5.2.tgz", + "integrity": "sha512-veTnRzkb6aPHOvSKIOy60KzURfBdUflr5VReI+NSaPL6xf+XLdONQgZgpYvUuZLVQ8dCqxpBAudaOM1+KpAUxw==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-events": "^2.5.4", + "bare-path": "^3.0.0", + "bare-stream": "^2.6.4", + "bare-url": "^2.2.2", + "fast-fifo": "^1.3.2" + }, + "engines": { + "bare": ">=1.16.0" + }, + "peerDependencies": { + "bare-buffer": "*" + }, + "peerDependenciesMeta": { + "bare-buffer": { + "optional": true + } + } + }, + "node_modules/bare-os": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/bare-os/-/bare-os-3.6.2.tgz", + "integrity": "sha512-T+V1+1srU2qYNBmJCXZkUY5vQ0B4FSlL3QDROnKQYOqeiQR8UbjNHlPa+TIbM4cuidiN9GaTaOZgSEgsvPbh5A==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "engines": { + "bare": ">=1.14.0" + } + }, + "node_modules/bare-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bare-path/-/bare-path-3.0.0.tgz", + "integrity": "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-os": "^3.0.1" + } + }, + "node_modules/bare-stream": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/bare-stream/-/bare-stream-2.7.0.tgz", + "integrity": "sha512-oyXQNicV1y8nc2aKffH+BUHFRXmx6VrPzlnaEvMhram0nPBrKcEdcyBg5r08D0i8VxngHFAiVyn1QKXpSG0B8A==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "streamx": "^2.21.0" + }, + "peerDependencies": { + "bare-buffer": "*", + "bare-events": "*" + }, + "peerDependenciesMeta": { + "bare-buffer": { + "optional": true + }, + "bare-events": { + "optional": true + } + } + }, + "node_modules/bare-url": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/bare-url/-/bare-url-2.3.2.tgz", + "integrity": "sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw==", + "dev": true, + "license": "Apache-2.0", + "optional": true, + "dependencies": { + "bare-path": "^3.0.0" + } + }, "node_modules/baseline-browser-mapping": { "version": "2.9.11", "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.11.tgz", @@ -3349,6 +3525,16 @@ "baseline-browser-mapping": "dist/cli.js" } }, + "node_modules/basic-ftp": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.1.0.tgz", + "integrity": "sha512-RkaJzeJKDbaDWTIPiJwubyljaEPwpVWkm9Rt5h9Nd6h7tEXTJ3VB4qxdZBioV7JO5yLUaOKwz7vDOzlncUsegw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, "node_modules/bowser": { "version": "2.13.1", "resolved": "https://registry.npmjs.org/bowser/-/bowser-2.13.1.tgz", @@ -3435,6 +3621,16 @@ "node-int64": "^0.4.0" } }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, "node_modules/buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", @@ -3523,6 +3719,20 @@ "node": ">=10" } }, + "node_modules/chromium-bidi": { + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/chromium-bidi/-/chromium-bidi-12.0.1.tgz", + "integrity": "sha512-fGg+6jr0xjQhzpy5N4ErZxQ4wF7KLEvhGZXD6EgvZKDhu7iOhZXnZhcDxPJDcwTcrD48NPzOCo84RP2lv3Z+Cg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "mitt": "^3.0.1", + "zod": "^3.24.1" + }, + "peerDependencies": { + "devtools-protocol": "*" + } + }, "node_modules/ci-info": { "version": "3.9.0", "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", @@ -3648,6 +3858,33 @@ "dev": true, "license": "MIT" }, + "node_modules/cosmiconfig": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.0.tgz", + "integrity": "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, "node_modules/create-jest": { "version": "29.7.0", "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", @@ -3692,6 +3929,16 @@ "node": ">= 8" } }, + "node_modules/data-uri-to-buffer": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-6.0.2.tgz", + "integrity": "sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", @@ -3742,6 +3989,21 @@ "node": ">=0.10.0" } }, + "node_modules/degenerator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz", + "integrity": "sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ast-types": "^0.13.4", + "escodegen": "^2.1.0", + "esprima": "^4.0.1" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -3761,6 +4023,13 @@ "node": ">=8" } }, + "node_modules/devtools-protocol": { + "version": "0.0.1534754", + "resolved": "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1534754.tgz", + "integrity": "sha512-26T91cV5dbOYnXdJi5qQHoTtUoNEqwkHcAyu/IKtjIAxiEqPMrDiRkDOPWVsGfNZGmlQVHQbZRSjD8sxagWVsQ==", + "dev": true, + "license": "BSD-3-Clause" + }, "node_modules/diff": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", @@ -3857,6 +4126,26 @@ "dev": true, "license": "MIT" }, + "node_modules/end-of-stream": { + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz", + "integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/error-ex": { "version": "1.3.4", "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", @@ -3935,6 +4224,28 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, "node_modules/eslint": { "version": "8.57.1", "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", @@ -4124,6 +4435,16 @@ "node": ">=0.10.0" } }, + "node_modules/events-universal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/events-universal/-/events-universal-1.0.1.tgz", + "integrity": "sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bare-events": "^2.7.0" + } + }, "node_modules/execa": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", @@ -4174,6 +4495,43 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, + "node_modules/extract-zip": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz", + "integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "debug": "^4.1.1", + "get-stream": "^5.1.0", + "yauzl": "^2.10.0" + }, + "bin": { + "extract-zip": "cli.js" + }, + "engines": { + "node": ">= 10.17.0" + }, + "optionalDependencies": { + "@types/yauzl": "^2.9.1" + } + }, + "node_modules/extract-zip/node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -4181,6 +4539,13 @@ "dev": true, "license": "MIT" }, + "node_modules/fast-fifo": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/fast-fifo/-/fast-fifo-1.3.2.tgz", + "integrity": "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==", + "dev": true, + "license": "MIT" + }, "node_modules/fast-glob": { "version": "3.3.3", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", @@ -4263,6 +4628,16 @@ "bser": "2.1.1" } }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "pend": "~1.2.0" + } + }, "node_modules/file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", @@ -4475,6 +4850,21 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/get-uri": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.5.tgz", + "integrity": "sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg==", + "dev": true, + "license": "MIT", + "dependencies": { + "basic-ftp": "^5.0.2", + "data-uri-to-buffer": "^6.0.2", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -4675,6 +5065,34 @@ "dev": true, "license": "MIT" }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/human-signals": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", @@ -4761,6 +5179,16 @@ "dev": true, "license": "ISC" }, + "node_modules/ip-address": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz", + "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", @@ -5823,6 +6251,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/mitt": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mitt/-/mitt-3.0.1.tgz", + "integrity": "sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==", + "dev": true, + "license": "MIT" + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -5844,6 +6279,16 @@ "dev": true, "license": "MIT" }, + "node_modules/netmask": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", + "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, "node_modules/nock": { "version": "13.5.6", "resolved": "https://registry.npmjs.org/nock/-/nock-13.5.6.tgz", @@ -5982,6 +6427,40 @@ "node": ">=6" } }, + "node_modules/pac-proxy-agent": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.2.0.tgz", + "integrity": "sha512-TEB8ESquiLMc0lV8vcd5Ql/JAKAoyzHFXaStwjkzpOpC5Yv+pIzLfHvjTSdf3vpa2bMiUQrg9i6276yn8666aA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/quickjs-emscripten": "^0.23.0", + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "get-uri": "^6.0.1", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.6", + "pac-resolver": "^7.0.1", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/pac-resolver": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", + "integrity": "sha512-5NPgf87AT2STgwa2ntRMr45jTKrYBGkVU36yT0ig/n/GMAa3oPqhZfIQ2kMEimReg0+t9kZViDVZ83qfVUlckg==", + "dev": true, + "license": "MIT", + "dependencies": { + "degenerator": "^5.0.0", + "netmask": "^2.0.2" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -6061,6 +6540,13 @@ "node": ">=8" } }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", + "dev": true, + "license": "MIT" + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", @@ -6198,6 +6684,16 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/prompts": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", @@ -6222,12 +6718,53 @@ "node": ">= 8" } }, + "node_modules/proxy-agent": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.5.0.tgz", + "integrity": "sha512-TmatMXdr2KlRiA2CyDu8GqR8EjahTG3aY3nXjdzFyoZbmB8hrBsTyMezhULIXKnC0jpfjlmiZ3+EaCzoInSu/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "http-proxy-agent": "^7.0.1", + "https-proxy-agent": "^7.0.6", + "lru-cache": "^7.14.1", + "pac-proxy-agent": "^7.1.0", + "proxy-from-env": "^1.1.0", + "socks-proxy-agent": "^8.0.5" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/proxy-agent/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", "license": "MIT" }, + "node_modules/pump": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz", + "integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -6238,6 +6775,47 @@ "node": ">=6" } }, + "node_modules/puppeteer": { + "version": "24.34.0", + "resolved": "https://registry.npmjs.org/puppeteer/-/puppeteer-24.34.0.tgz", + "integrity": "sha512-Sdpl/zsYOsagZ4ICoZJPGZw8d9gZmK5DcxVal11dXi/1/t2eIXHjCf5NfmhDg5XnG9Nye+yo/LqMzIxie2rHTw==", + "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "dependencies": { + "@puppeteer/browsers": "2.11.0", + "chromium-bidi": "12.0.1", + "cosmiconfig": "^9.0.0", + "devtools-protocol": "0.0.1534754", + "puppeteer-core": "24.34.0", + "typed-query-selector": "^2.12.0" + }, + "bin": { + "puppeteer": "lib/cjs/puppeteer/node/cli.js" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/puppeteer-core": { + "version": "24.34.0", + "resolved": "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-24.34.0.tgz", + "integrity": "sha512-24evawO+mUGW4mvS2a2ivwLdX3gk8zRLZr9HP+7+VT2vBQnm0oh9jJEZmUE3ePJhRkYlZ93i7OMpdcoi2qNCLg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@puppeteer/browsers": "2.11.0", + "chromium-bidi": "12.0.1", + "debug": "^4.4.3", + "devtools-protocol": "0.0.1534754", + "typed-query-selector": "^2.12.0", + "webdriver-bidi-protocol": "0.3.10", + "ws": "^8.18.3" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/pure-rand": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz", @@ -6474,6 +7052,47 @@ "node": ">=8" } }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.8.7", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.7.tgz", + "integrity": "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ip-address": "^10.0.1", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", @@ -6531,6 +7150,18 @@ "node": ">=8" } }, + "node_modules/streamx": { + "version": "2.23.0", + "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.23.0.tgz", + "integrity": "sha512-kn+e44esVfn2Fa/O0CPFcex27fjIL6MkVae0Mm6q+E6f0hWv578YCERbv+4m02cjxvDsPKLnmxral/rR6lBMAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "events-universal": "^1.0.0", + "fast-fifo": "^1.3.2", + "text-decoder": "^1.1.0" + } + }, "node_modules/string-length": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", @@ -6644,6 +7275,33 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/tar-fs": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-3.1.1.tgz", + "integrity": "sha512-LZA0oaPOc2fVo82Txf3gw+AkEd38szODlptMYejQUhndHMLQ9M059uXR+AfS7DNo0NpINvSqDsvyaCrBVkptWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "pump": "^3.0.0", + "tar-stream": "^3.1.5" + }, + "optionalDependencies": { + "bare-fs": "^4.0.1", + "bare-path": "^3.0.0" + } + }, + "node_modules/tar-stream": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-3.1.7.tgz", + "integrity": "sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "b4a": "^1.6.4", + "fast-fifo": "^1.2.0", + "streamx": "^2.15.0" + } + }, "node_modules/test-exclude": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", @@ -6683,6 +7341,16 @@ "node": "*" } }, + "node_modules/text-decoder": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/text-decoder/-/text-decoder-1.2.3.tgz", + "integrity": "sha512-3/o9z3X0X0fTupwsYvR03pJ/DjWuqqrfwBgTQzdWDiQSm9KitAyz/9WqsT2JQW7KV2m+bC2ol/zqpW37NHxLaA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "b4a": "^1.6.4" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -6875,6 +7543,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/typed-query-selector": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/typed-query-selector/-/typed-query-selector-2.12.0.tgz", + "integrity": "sha512-SbklCd1F0EiZOyPiW192rrHZzZ5sBijB6xM+cpmrwDqObvdtunOHHIk9fCGsoK5JVIYXoyEp4iEdE3upFH3PAg==", + "dev": true, + "license": "MIT" + }, "node_modules/typescript": { "version": "5.9.3", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", @@ -6982,6 +7657,13 @@ "makeerror": "1.0.12" } }, + "node_modules/webdriver-bidi-protocol": { + "version": "0.3.10", + "resolved": "https://registry.npmjs.org/webdriver-bidi-protocol/-/webdriver-bidi-protocol-0.3.10.tgz", + "integrity": "sha512-5LAE43jAVLOhB/QqX4bwSiv0Hg1HBfMmOuwBSXHdvg4GMGu9Y0lIq7p4R/yySu6w74WmaR4GM4H9t2IwLW7hgw==", + "dev": true, + "license": "Apache-2.0" + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -7054,6 +7736,28 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, "node_modules/y18n": { "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", @@ -7100,6 +7804,17 @@ "node": ">=12" } }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, "node_modules/yn": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", @@ -7122,6 +7837,16 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } } } } diff --git a/applications/chatops/slack-bot/package.json b/applications/chatops/slack-bot/package.json index 5db6ddb..535cc77 100644 --- a/applications/chatops/slack-bot/package.json +++ b/applications/chatops/slack-bot/package.json @@ -32,6 +32,7 @@ "eslint": "^8.56.0", "jest": "^29.7.0", "nock": "^13.5.0", + "puppeteer": "^24.34.0", "ts-jest": "^29.1.1", "ts-node": "^10.9.2", "typescript": "^5.3.3" diff --git a/applications/chatops/slack-bot/performance-tests/.env.example b/applications/chatops/slack-bot/performance-tests/.env.example new file mode 100644 index 0000000..e9b2b6c --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/.env.example @@ -0,0 +1,12 @@ +# Performance Testing Environment Variables +# Copy this file to .env and fill in your values (DO NOT commit .env) + +# Environment to test (plt, dev, prd) +ENVIRONMENT=plt + +# AWS Region +AWS_REGION=ca-central-1 + +# API Gateway URL with full path (get from Terragrunt output or AWS Console) +# Example: https://xxxxxx.execute-api.ca-central-1.amazonaws.com/prod/slack +API_GATEWAY_URL= diff --git a/applications/chatops/slack-bot/performance-tests/README.md b/applications/chatops/slack-bot/performance-tests/README.md new file mode 100644 index 0000000..343d1b2 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/README.md @@ -0,0 +1,391 @@ +# Slack Bot Performance Testing + +End-to-end performance testing for the Slack bot architecture using Artillery. + +## Quick Start + +```bash +# Run full performance test +make perf-test + +# Run quick 2-minute test +make perf-test-quick + +# Generate HTML report from latest test +make perf-test-report +``` + +## Prerequisites + +1. **AWS Credentials** - Configured with access to: + - SSM Parameter Store (to fetch Slack signing secret) + - API Gateway endpoint + +2. **Slack Signing Secret** - Must be stored in SSM: + ``` + /laco/plt/aws/secrets/slack/signing-secret + ``` + +3. **API Gateway URL** - Full URL including path, auto-detected from Terragrunt, or set manually: + ```bash + make perf-test API_GATEWAY_URL=https://xxx.execute-api.ca-central-1.amazonaws.com/prod/slack + ``` + +## Test Scenarios + +The Artillery test runs through 5 phases: + +1. **Warm-up** (60s @ 2 req/s) + - Ensures Lambda functions are warm + - Prevents cold start bias + +2. **Ramp-up** (180s, 5 → 20 req/s) + - Gradually increases load + - Tests auto-scaling behavior + +3. **Sustained Load** (300s @ 20 req/s) + - Steady-state performance + - Establishes baseline metrics + +4. **High Load** (120s, 20 → 45 req/s) + - Approaches API Gateway throttle limit (50 req/s) + - Tests behavior under stress + +5. **Cool-down** (60s @ 5 req/s) + - Allows system to stabilize + +**Total Duration:** ~12 minutes + +## Command Distribution + +Matches realistic usage patterns: + +- `/echo` (40%) - Fast command, minimal processing +- `/status` (30%) - Medium complexity, service checks +- `/deploy` (20%) - Slower, deployment simulation +- `/build` (10%) - Slower, GitHub API integration + +## Performance Thresholds + +Tests fail if any threshold is exceeded: + +- **Error Rate:** < 1% +- **P95 Latency:** < 3000ms (3 seconds) +- **P99 Latency:** < 5000ms (5 seconds) + +## Output + +### Console Output + +Real-time statistics during the test: +``` +Scenarios launched: 1234 +Scenarios completed: 1200 +Requests completed: 1200 +Mean response/sec: 19.2 +Response time (msec): + min: 245 + max: 4521 + median: 892 + p95: 2134 + p99: 3456 +``` + +### JSON Results + +Saved to `performance-tests/results/test-YYYYMMDD-HHMMSS.json` + +Contains: +- Request/response timings +- Error counts and types +- Percentile distributions +- Phase-by-phase breakdown + +### HTML Report + +Generate with: `make perf-test-report` + +Includes: +- Interactive charts +- Timeline view +- Request distribution graphs +- Error analysis + +## Interpreting Results + +### Key Metrics to Watch + +1. **P95/P99 Latency** + - Target: < 3000ms / < 5000ms + - High values indicate bottlenecks + +2. **Error Rate** + - Target: < 1% + - Errors by status code: + - 429: API Gateway throttling + - 500: Lambda errors + - 503: Service unavailable + +3. **Response Time Distribution** + - Should follow command complexity: + - Echo: 500-1000ms + - Status: 1000-2000ms + - Deploy/Build: 2000-3000ms + +### Common Issues + +#### High P95/P99 Latency + +**Symptoms:** +- P99 > 5000ms +- Wide gap between median and P99 + +**Possible Causes:** +- Cold starts +- Worker concurrency limits (5 concurrent) +- Queue backlog + +**Investigation:** +1. Check CloudWatch Lambda concurrent executions +2. Check SQS queue depth during test +3. Review X-Ray traces for slow subsegments + +#### API Gateway Throttling (429 errors) + +**Symptoms:** +- 429 status codes +- Errors during high-load phase + +**Root Cause:** +- Default 50 req/s throttle limit exceeded + +**Solutions:** +- Request quota increase +- Reduce test load +- Implement request queuing + +#### Lambda Errors (500/502/503) + +**Symptoms:** +- 5xx status codes +- Errors in specific commands + +**Investigation:** +1. Check CloudWatch Logs for Lambda errors +2. Review error messages in Artillery output +3. Check Lambda timeout configuration + +## Advanced Usage + +### Configuring CloudWatch Logs Query Wait Time + +The `analyze-performance.sh` script uses a polling mechanism to wait for CloudWatch Logs Insights queries to complete. You can configure the behavior using environment variables: + +```bash +# Maximum time to wait for a query to complete (default: 60 seconds) +export CLOUDWATCH_QUERY_MAX_WAIT=120 + +# Interval between status checks (default: 2 seconds) +export CLOUDWATCH_QUERY_POLL_INTERVAL=3 + +# Run analysis +./analyze-performance.sh --from-test +``` + +These settings are useful when: +- Querying large log volumes that take longer to process +- Operating in regions with higher latency +- Running queries during periods of high CloudWatch API load + +### Custom Test Duration + +Edit `artillery-config.yml` phases: + +```yaml +phases: + - duration: 120 # 2 minutes instead of default + arrivalRate: 10 +``` + +### Test Single Command + +Create custom scenario file: + +```yaml +# artillery-echo-only.yml +config: + target: "{{ $processEnvironment.API_GATEWAY_URL }}" + phases: + - duration: 60 + arrivalRate: 20 + processor: "./slack-signature-processor.js" + +scenarios: + - name: "Echo Only" + flow: + - post: + url: "/slack" + beforeRequest: "generateSlackSignature" + body: "token=test&command=/echo&text=test&..." +``` + +Run: `artillery run artillery-echo-only.yml` + +### Different Environment + +```bash +# Test against dev environment +make perf-test ENVIRONMENT=dev + +# Test against production (careful!) +make perf-test ENVIRONMENT=prd +``` + +### Manual Artillery Run + +```bash +# Set environment variables +export API_GATEWAY_URL="https://xxx.execute-api.ca-central-1.amazonaws.com" +export ENVIRONMENT="plt" +export AWS_REGION="ca-central-1" + +# Run test +cd performance-tests +artillery run artillery-config.yml --output results/test.json + +# Generate HTML report +node render-report.js results/test.json results/report.html +# +# Or from repo root: +# make perf-test-report REPORT_JSON=performance-tests/results/test.json +``` +Note: The report uses Chart.js from a CDN, so charts require network access when viewing. + +## Monitoring During Tests + +### CloudWatch Dashboard + +Monitor in real-time: +``` +https://console.aws.amazon.com/cloudwatch/home?region=ca-central-1#dashboards:name=SlackBot-Performance-PLT +``` + +### CloudWatch Logs Insights + +Query during test: + +```sql +fields @timestamp, correlationId, e2eLatency, component +| filter component = "echo-worker" +| stats avg(e2eLatency) as avg, percentile(e2eLatency, 95) as p95 by bin(1m) +``` + +### X-Ray Service Map + +View distributed trace: +``` +https://console.aws.amazon.com/xray/home?region=ca-central-1#/service-map +``` + +## Baseline Performance + +Establish baseline before architectural changes: + +```bash +# Run full test +make perf-test + +# Generate report +make perf-test-report + +# Archive results +cp performance-tests/results/test-*.json baseline-YYYYMMDD.json +``` + +Compare before/after: +1. P50/P95/P99 latencies +2. Error rates +3. Throughput (req/s) +4. Resource utilization + +## Troubleshooting + +### "Failed to fetch Slack signing secret from SSM" + +**Solution:** +```bash +# Verify secret exists +aws ssm get-parameter \ + --name /laco/plt/aws/secrets/slack/signing-secret \ + --with-decryption \ + --region ca-central-1 + +# Check AWS credentials +aws sts get-caller-identity +``` + +### "Could not get API Gateway URL from Terragrunt" + +**Solution:** +```bash +# Get URL manually +cd ../../../../cloud-sandbox/aws/10-plt/slack-api-gateway +terragrunt output api_gateway_url + +# Set manually +make perf-test API_GATEWAY_URL=https://xxx.execute-api.ca-central-1.amazonaws.com +``` + +### "Artillery command not found" + +**Solution:** +```bash +# Install globally +npm install -g artillery + +# Or use npx +npx artillery run artillery-config.yml +``` + +### High error rate during test + +**Check:** +1. Is the PLT environment healthy? +2. Are Lambdas deployed? +3. Is Slack signing secret correct? +4. Are EventBridge rules configured? +5. Are SQS queues created? + +```bash +# Verify stack health +cd ../../../../cloud-sandbox/aws/10-plt +terragrunt run-all output +``` + +## Files + +``` +performance-tests/ +├── README.md # This file +├── artillery-config.yml # Main Artillery configuration +├── slack-signature-processor.js # Slack signature generation +└── results/ # Test results (gitignored) + ├── test-YYYYMMDD-HHMMSS.json # Raw test data + └── report-YYYYMMDD-HHMMSS.html # HTML report +``` + +## Next Steps + +After establishing baseline with Artillery: + +1. **Component Testing** - Test individual components with AWS SDK +2. **Identify Bottlenecks** - Compare Artillery E2E vs component latencies +3. **Optimize** - Focus on slowest components +4. **Re-test** - Validate improvements with Artillery +5. **Production Monitoring** - Set up CloudWatch alarms based on baselines + +## References + +- [Artillery Documentation](https://www.artillery.io/docs) +- [Performance Testing Guide](/cloud-control-plane/docs/guides/performance-testing-guide.md) +- [Slack Bot Architecture](/cloud-control-plane/docs/architecture/) diff --git a/applications/chatops/slack-bot/performance-tests/analyze-e2e-json.sh b/applications/chatops/slack-bot/performance-tests/analyze-e2e-json.sh new file mode 100755 index 0000000..48feceb --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/analyze-e2e-json.sh @@ -0,0 +1,158 @@ +#!/bin/bash +# Extract E2E performance metrics from Artillery test and CloudWatch Logs +# Output: JSON format for dashboard integration + +set -e + +ENVIRONMENT="${ENVIRONMENT:-plt}" +REGION="ca-central-1" + +# Find latest Artillery test result (exclude .metrics.json files) +LATEST_RESULT=$(ls -t results/*.json 2>/dev/null | grep -v '\.metrics\.json' | head -1) + +if [ -z "$LATEST_RESULT" ]; then + echo '{"error": "No Artillery test results found"}' >&2 + exit 1 +fi + +echo "Analyzing: $LATEST_RESULT" >&2 + +# Extract timestamps from Artillery JSON +read START_MS END_MS < <( + node -pe " + const data = require('./$LATEST_RESULT'); + const agg = data.aggregate || data.rawAggregate || {}; + const start = agg.firstMetricAt || 0; + const end = agg.lastMetricAt || 0; + \`\${start} \${end}\` + " +) + +if [ "$START_MS" = "0" ] || [ "$END_MS" = "0" ]; then + echo '{"error": "Could not extract timestamps from Artillery result"}' >&2 + exit 1 +fi + +# Helper function to execute query and wait for results +query_logs() { + local log_group=$1 + local query_string=$2 + local metric_name=$3 + + # Convert milliseconds to seconds for AWS Logs API + local start_sec=$((START_MS / 1000)) + local end_sec=$((END_MS / 1000)) + # Add 5-minute buffer to account for log ingestion delay + local end_sec_buffered=$((end_sec + 300)) + + local query_id=$(aws logs start-query \ + --log-group-name "$log_group" \ + --start-time $start_sec \ + --end-time $end_sec_buffered \ + --region $REGION \ + --query-string "$query_string" \ + --query 'queryId' \ + --output text) + + # Wait for query to complete + sleep 5 + + aws logs get-query-results \ + --query-id "$query_id" \ + --region $REGION \ + --output json | jq -r '.results' +} + +# 1. Router Lambda Performance +# Note: Using REPORT filter which counts only completed invocations +# If count differs from Artillery requests, check CloudWatch for: +# - Lambda initialization errors (no REPORT logged) +# - Timeouts (REPORT may be delayed beyond query window) +# - API Gateway errors (request never reached Lambda) +echo "Querying Router Lambda metrics..." >&2 +ROUTER_METRICS=$(query_logs \ + "/aws/lambda/laco-${ENVIRONMENT}-slack-router" \ + "fields @duration | filter @type = \"REPORT\" | stats count() as invocations, avg(@duration) as avg_ms, percentile(@duration, 50) as p50_ms, percentile(@duration, 95) as p95_ms, percentile(@duration, 99) as p99_ms, max(@duration) as max_ms" \ + "router") + +# 2. Worker Lambda Performance +# Note: Using REPORT filter which counts only completed invocations +# Difference between Router and Worker counts indicates: +# - SQS message loss or DLQ routing +# - Worker timeouts or initialization failures +echo "Querying Worker Lambda metrics..." >&2 +WORKER_METRICS=$(query_logs \ + "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + "fields @duration | filter @type = \"REPORT\" | stats count() as invocations, avg(@duration) as avg_ms, percentile(@duration, 50) as p50_ms, percentile(@duration, 95) as p95_ms, percentile(@duration, 99) as p99_ms, max(@duration) as max_ms" \ + "worker") + +# 3. End-to-End Latency & Component Breakdown (from Performance metrics) +echo "Querying E2E latency & component breakdown..." >&2 +E2E_METRICS=$(query_logs \ + "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + "fields totalE2eMs, queueWaitMs, workerDurationMs, syncResponseMs, asyncResponseMs | filter message = \"Performance metrics\" | stats count() as requests, avg(totalE2eMs) as avg_e2e_ms, percentile(totalE2eMs, 50) as p50_e2e_ms, percentile(totalE2eMs, 95) as p95_e2e_ms, percentile(totalE2eMs, 99) as p99_e2e_ms, avg(queueWaitMs) as avg_queue_wait_ms, avg(workerDurationMs) as avg_worker_ms, avg(syncResponseMs) as avg_sync_response_ms, avg(asyncResponseMs) as avg_async_response_ms" \ + "e2e" 2>/dev/null || echo "[]") + +# 4. Error Analysis +echo "Querying errors..." >&2 +ROUTER_ERRORS=$(query_logs \ + "/aws/lambda/laco-${ENVIRONMENT}-slack-router" \ + "fields @message | filter level = \"error\" or @message like /ERROR/ | stats count() as error_count" \ + "router_errors" 2>/dev/null || echo "[]") + +WORKER_ERRORS=$(query_logs \ + "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + "fields @message | filter level = \"error\" or @message like /ERROR/ | stats count() as error_count" \ + "worker_errors" 2>/dev/null || echo "[]") + +# Parse Artillery metrics +ARTILLERY_SUMMARY=$(node -pe " + const data = require('./$LATEST_RESULT'); + const agg = data.aggregate || {}; + const counters = agg.counters || {}; + const summaries = agg.summaries || {}; + const responseTime = summaries['http.response_time'] || {}; + JSON.stringify({ + requests: counters['http.requests'] || 0, + responses: counters['http.responses'] || 0, + errors: counters['errors.ETIMEDOUT'] || counters['vusers.failed'] || 0, + errorRate: counters['http.requests'] ? ((counters['vusers.failed'] || 0) / counters['http.requests'] * 100).toFixed(2) : 0, + avgRps: (counters['http.requests'] || 0) / ((data.aggregate.lastMetricAt - data.aggregate.firstMetricAt) / 1000) || 0, + p50: responseTime.median || 0, + p95: responseTime.p95 || 0, + p99: responseTime.p99 || 0, + durationMs: (data.aggregate.lastMetricAt - data.aggregate.firstMetricAt) || 0 + }); +") + +# Determine output file name (testname.metrics.json) +TESTNAME=$(basename "$LATEST_RESULT" .json) +METRICS_FILE="results/${TESTNAME}.metrics.json" + +# Build final JSON output +cat < "$METRICS_FILE" +{ + "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", + "environment": "$ENVIRONMENT", + "testFile": "$LATEST_RESULT", + "timeRange": { + "startMs": $START_MS, + "endMs": $END_MS, + "durationMs": $((END_MS - START_MS)) + }, + "artillery": $ARTILLERY_SUMMARY, + "cloudwatch": { + "router": $(echo "$ROUTER_METRICS" | jq -r 'if type == "array" and length > 0 then .[0] | map({(.field): .value}) | add else {} end'), + "worker": $(echo "$WORKER_METRICS" | jq -r 'if type == "array" and length > 0 then .[0] | map({(.field): .value}) | add else {} end'), + "e2e": $(echo "$E2E_METRICS" | jq -r 'if type == "array" and length > 0 then .[0] | map({(.field): .value}) | add else {} end'), + "errors": { + "router": $(echo "$ROUTER_ERRORS" | jq -r 'if type == "array" and length > 0 then (.[0] | map(select(.field == "error_count") | .value) | .[0] // 0) else 0 end'), + "worker": $(echo "$WORKER_ERRORS" | jq -r 'if type == "array" and length > 0 then (.[0] | map(select(.field == "error_count") | .value) | .[0] // 0) else 0 end') + } + } +} +EOF + +echo "" >&2 +echo "✓ Metrics saved to: $METRICS_FILE" >&2 +echo "Analysis complete!" >&2 diff --git a/applications/chatops/slack-bot/performance-tests/analyze-performance.sh b/applications/chatops/slack-bot/performance-tests/analyze-performance.sh new file mode 100755 index 0000000..e2915d9 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/analyze-performance.sh @@ -0,0 +1,571 @@ +#!/bin/bash +# Analyze performance test results from CloudWatch Logs + +set -e + +ENVIRONMENT="${ENVIRONMENT:-plt}" +REGION="ca-central-1" + +# Configurable query wait parameters +QUERY_MAX_WAIT_TIME="${CLOUDWATCH_QUERY_MAX_WAIT:-60}" # Maximum wait time in seconds +QUERY_POLL_INTERVAL="${CLOUDWATCH_QUERY_POLL_INTERVAL:-2}" # Poll interval in seconds + +# Check for flags +OUTPUT_JSON=false +USE_TEST_RESULT=false +QUIET_MODE=false + +for arg in "$@"; do + case $arg in + --from-test) + USE_TEST_RESULT=true + shift + ;; + --json) + OUTPUT_JSON=true + shift + ;; + --quiet|-q) + QUIET_MODE=true + shift + ;; + esac +done + +# Helper: echo only if not quiet +echo_info() { + if [ "$QUIET_MODE" = false ]; then + echo "$@" + fi +} + +# Helper: macOS/Linux compatible date conversion +timestamp_to_date() { + local ts=$1 + if [ "$(uname)" = "Darwin" ]; then + date -r "$ts" '+%Y-%m-%d %H:%M:%S' + else + date -d "@$ts" '+%Y-%m-%d %H:%M:%S' + fi +} + +# Helper: Calculate time N minutes ago (macOS/Linux compatible) +minutes_ago_timestamp() { + local minutes=$1 + local now=$(date +%s) + echo $((now - (minutes * 60))) +} + +# Helper: Poll CloudWatch Logs Insights query until completion +# Usage: wait_for_query_completion +# Returns: 0 if query completed successfully, 1 if timeout or failed +wait_for_query_completion() { + local query_id=$1 + local region=$2 + local elapsed=0 + + while [ $elapsed -lt $QUERY_MAX_WAIT_TIME ]; do + # Get query results and check for AWS CLI errors + local aws_output=$(aws logs get-query-results --query-id "$query_id" --region "$region" --output json 2>&1) + local aws_exit_code=$? + + if [ $aws_exit_code -ne 0 ]; then + echo_info " ⚠ AWS CLI error for query $query_id: $aws_output" >&2 + return 1 + fi + + local status=$(echo "$aws_output" | jq -r '.status' 2>/dev/null) + + if [ -z "$status" ] || [ "$status" = "null" ]; then + echo_info " ⚠ Unable to parse query status for $query_id" >&2 + return 1 + fi + + case "$status" in + "Complete") + return 0 + ;; + "Failed"|"Cancelled"|"Timeout") + echo_info " ⚠ Query $query_id failed with status: $status" >&2 + return 1 + ;; + "Running"|"Scheduled") + sleep $QUERY_POLL_INTERVAL + elapsed=$((elapsed + QUERY_POLL_INTERVAL)) + ;; + *) + # Unknown status - log it for debugging + echo_info " ⚠ Unexpected query status for $query_id: '$status'" >&2 + return 1 + ;; + esac + done + + echo_info " ⚠ Query $query_id timed out after ${QUERY_MAX_WAIT_TIME}s" >&2 + return 1 +} + + +# Determine time range +if [ "$USE_TEST_RESULT" = true ]; then + # Use latest Artillery test result (exclude .metrics.json files) + LATEST_RESULT=$(ls -t results/*.json 2>/dev/null | grep -v '\.metrics\.json' | head -1) + + if [ -z "$LATEST_RESULT" ]; then + echo "Error: No Artillery test results found in results/ directory" + exit 1 + fi + + echo "Using time range from: $LATEST_RESULT" + + # Extract timestamps from Artillery JSON + read START_TIMESTAMP END_TIMESTAMP < <( + node -pe " + const data = require('./$LATEST_RESULT'); + const start = data.aggregate.firstMetricAt || data.rawAggregate.firstMetricAt; + const end = data.aggregate.lastMetricAt || data.rawAggregate.lastMetricAt; + \`\${start} \${end}\` + " + ) + + if [ -z "$START_TIMESTAMP" ] || [ -z "$END_TIMESTAMP" ]; then + echo "Error: Could not extract timestamps from $LATEST_RESULT" + exit 1 + fi + + # Convert milliseconds to seconds for AWS CLI + START_TIMESTAMP=$((START_TIMESTAMP / 1000)) + END_TIMESTAMP=$((END_TIMESTAMP / 1000)) + + START_TIME_HUMAN=$(timestamp_to_date $START_TIMESTAMP) + END_TIME_HUMAN=$(timestamp_to_date $END_TIMESTAMP) + + echo_info "Test window: $START_TIME_HUMAN ~ $END_TIME_HUMAN" + echo_info "" +else + # Use traditional time range (last N minutes) + START_TIME="${1:-15}" + echo_info "Analyzing last ${START_TIME} minutes of logs..." + echo_info "" + + # Calculate timestamps (seconds, for AWS CLI) + END_TIMESTAMP=$(date +%s) + START_TIMESTAMP=$((END_TIMESTAMP - (START_TIME * 60))) +fi + +# Add 5-minute buffer to end time to account for log ingestion delay +END_TIMESTAMP_BUFFERED=$((END_TIMESTAMP + 300)) + +# Calculate optimal period for CloudWatch metrics to avoid exceeding 1440 datapoints limit +# Formula: period = max(60, ceil(duration / 1440) * 60) rounded up to nearest minute +TEST_DURATION=$((END_TIMESTAMP - START_TIMESTAMP)) +CLOUDWATCH_PERIOD=60 +if [ $TEST_DURATION -gt 86400 ]; then + # For duration > 1 day, use 5-minute periods + CLOUDWATCH_PERIOD=300 +elif [ $TEST_DURATION -gt 43200 ]; then + # For duration > 12 hours, use 3-minute periods + CLOUDWATCH_PERIOD=180 +elif [ $TEST_DURATION -gt 14400 ]; then + # For duration > 4 hours, use 2-minute periods + CLOUDWATCH_PERIOD=120 +else + # For duration <= 4 hours, use 1-minute periods (gives max 240 datapoints for 4 hours) + CLOUDWATCH_PERIOD=60 +fi + +echo_info "CloudWatch period: ${CLOUDWATCH_PERIOD}s (test duration: ${TEST_DURATION}s)" + +# Initialize JSON output structure +if [ "$OUTPUT_JSON" = true ]; then + JSON_OUTPUT="{\"timestamp\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\",\"environment\":\"$ENVIRONMENT\",\"timeRange\":{\"start\":$((START_TIMESTAMP * 1000)),\"end\":$((END_TIMESTAMP * 1000))},\"metrics\":{}}" +fi + +echo_info "========================================" +echo_info "Component Performance Analysis" +echo_info "========================================" +echo_info "Environment: ${ENVIRONMENT}" +echo_info "" + +# 0. End-to-End Latency & Component Breakdown (from Performance metrics log) +echo_info "0. End-to-End Latency & Component Breakdown" +echo_info "----------------------------------------" +echo_info "Note: Using structured Performance metrics from echo worker" +echo_info "" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, totalE2eMs, workerDurationMs, queueWaitMs, syncResponseMs, asyncResponseMs +| filter message = "Performance metrics" +| stats + count() as requests, + avg(totalE2eMs) as avg_e2e_ms, + percentile(totalE2eMs, 50) as p50_e2e_ms, + percentile(totalE2eMs, 95) as p95_e2e_ms, + percentile(totalE2eMs, 99) as p99_e2e_ms, + max(totalE2eMs) as max_e2e_ms, + avg(workerDurationMs) as avg_worker_ms, + avg(queueWaitMs) as avg_queue_wait_ms, + avg(syncResponseMs) as avg_sync_response_ms, + avg(asyncResponseMs) as avg_async_response_ms +' > /tmp/e2e-query.json 2>/dev/null || echo " ⚠ E2E tracking not available (Performance metrics not found in logs)" + +if [ -f /tmp/e2e-query.json ]; then + E2E_QUERY_ID=$(cat /tmp/e2e-query.json | jq -r '.queryId' 2>/dev/null) + if [ "$E2E_QUERY_ID" != "null" ] && [ -n "$E2E_QUERY_ID" ]; then + if wait_for_query_completion "$E2E_QUERY_ID" "$REGION"; then + aws logs get-query-results --query-id ${E2E_QUERY_ID} --region ${REGION} --output table 2>/dev/null || echo " ⚠ Query failed" + fi + fi +fi + +echo "" + +# 1. Router Lambda Performance +echo "1. API Gateway → Router Lambda" +echo "----------------------------------------" +echo "Note: Counting START events to capture all invocations (including failed)" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-slack-router" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @requestId +| filter @type = "START" +| stats count() as total_invocations +' > /tmp/router-start-query.json + +ROUTER_START_QUERY_ID=$(cat /tmp/router-start-query.json | jq -r '.queryId') + +# Also get REPORT metrics for performance data +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-slack-router" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @duration, @billedDuration, @memorySize, @maxMemoryUsed +| filter @type = "REPORT" +| stats + count() as completed_invocations, + avg(@duration) as avg_duration_ms, + percentile(@duration, 50) as p50_ms, + percentile(@duration, 95) as p95_ms, + percentile(@duration, 99) as p99_ms, + max(@duration) as max_ms, + avg(@maxMemoryUsed / 1024 / 1024) as avg_memory_mb, + max(@maxMemoryUsed / 1024 / 1024) as max_memory_mb +' > /tmp/router-query.json + +ROUTER_QUERY_ID=$(cat /tmp/router-query.json | jq -r '.queryId') + +echo "Total Invocations (START events):" +if wait_for_query_completion "$ROUTER_START_QUERY_ID" "$REGION"; then + aws logs get-query-results \ + --query-id ${ROUTER_START_QUERY_ID} \ + --region ${REGION} \ + --output table +fi + +echo "" +echo "Completed Invocations (REPORT events with performance data):" +if wait_for_query_completion "$ROUTER_QUERY_ID" "$REGION"; then + aws logs get-query-results \ + --query-id ${ROUTER_QUERY_ID} \ + --region ${REGION} \ + --output table +fi + +echo "" + +# 2. Echo Worker Lambda Performance +echo "2. Echo Worker Lambda" +echo "----------------------------------------" +echo "Note: Counting START events to capture all invocations (including failed)" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @requestId +| filter @type = "START" +| stats count() as total_invocations +' > /tmp/worker-start-query.json + +WORKER_START_QUERY_ID=$(cat /tmp/worker-start-query.json | jq -r '.queryId') + +# Also get REPORT metrics for performance data +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @duration, @billedDuration, @memorySize, @maxMemoryUsed +| filter @type = "REPORT" +| stats + count() as completed_invocations, + avg(@duration) as avg_duration_ms, + percentile(@duration, 50) as p50_ms, + percentile(@duration, 95) as p95_ms, + percentile(@duration, 99) as p99_ms, + max(@duration) as max_ms, + avg(@maxMemoryUsed / 1024 / 1024) as avg_memory_mb, + max(@maxMemoryUsed / 1024 / 1024) as max_memory_mb +' > /tmp/worker-query.json + +WORKER_QUERY_ID=$(cat /tmp/worker-query.json | jq -r '.queryId') + +echo "Total Invocations (START events):" +if wait_for_query_completion "$WORKER_START_QUERY_ID" "$REGION"; then + aws logs get-query-results \ + --query-id ${WORKER_START_QUERY_ID} \ + --region ${REGION} \ + --output table +fi + +echo "" +echo "Completed Invocations (REPORT events with performance data):" +if wait_for_query_completion "$WORKER_QUERY_ID" "$REGION"; then + aws logs get-query-results \ + --query-id ${WORKER_QUERY_ID} \ + --region ${REGION} \ + --output table +fi + +echo "" + +# 3. Error Analysis +echo "3. Error Analysis" +echo "----------------------------------------" +echo "Router Errors:" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-slack-router" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @message +| filter @message like /ERROR/ or @message like /Invalid signature/ +| stats count() as error_count by @message +| limit 20 +' > /tmp/router-errors.json + +ROUTER_ERROR_ID=$(cat /tmp/router-errors.json | jq -r '.queryId') +if wait_for_query_completion "$ROUTER_ERROR_ID" "$REGION"; then + aws logs get-query-results --query-id ${ROUTER_ERROR_ID} --region ${REGION} --output table +fi + +echo "" +echo "Worker Errors:" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @message +| filter @message like /ERROR/ or level = "error" +| stats count() as error_count by @message +| limit 20 +' > /tmp/worker-errors.json + +WORKER_ERROR_ID=$(cat /tmp/worker-errors.json | jq -r '.queryId') +if wait_for_query_completion "$WORKER_ERROR_ID" "$REGION"; then + aws logs get-query-results --query-id ${WORKER_ERROR_ID} --region ${REGION} --output table +fi + +echo "" + +# 4. Cold Starts +echo "4. Cold Start Analysis" +echo "----------------------------------------" +echo "Router Cold Starts:" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-slack-router" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @initDuration +| filter @type = "REPORT" and ispresent(@initDuration) +| stats + count() as cold_starts, + avg(@initDuration) as avg_init_ms, + max(@initDuration) as max_init_ms +' > /tmp/router-cold.json + +ROUTER_COLD_ID=$(cat /tmp/router-cold.json | jq -r '.queryId') +if wait_for_query_completion "$ROUTER_COLD_ID" "$REGION"; then + aws logs get-query-results --query-id ${ROUTER_COLD_ID} --region ${REGION} --output table +fi + +echo "" +echo "Worker Cold Starts:" +aws logs start-query \ + --log-group-name "/aws/lambda/laco-${ENVIRONMENT}-chatbot-echo-worker" \ + --start-time ${START_TIMESTAMP} \ + --end-time ${END_TIMESTAMP_BUFFERED} \ + --region ${REGION} \ + --query-string ' +fields @timestamp, @initDuration +| filter @type = "REPORT" and ispresent(@initDuration) +| stats + count() as cold_starts, + avg(@initDuration) as avg_init_ms, + max(@initDuration) as max_init_ms +' > /tmp/worker-cold.json + +WORKER_COLD_ID=$(cat /tmp/worker-cold.json | jq -r '.queryId') +if wait_for_query_completion "$WORKER_COLD_ID" "$REGION"; then + aws logs get-query-results --query-id ${WORKER_COLD_ID} --region ${REGION} --output table +fi + +echo_info "" +echo_info "========================================" +echo_info "CloudWatch Metrics (Lambda)" +echo_info "========================================" + +# Skip CloudWatch Metrics in quiet mode (optional, can fail) +if [ "$QUIET_MODE" = true ]; then + echo_info "(Skipped in quiet mode)" +else + +# 5. Concurrent Executions +echo_info "" +echo_info "5. Concurrent Executions" +echo_info "----------------------------------------" + +echo_info "Router Lambda:" +aws cloudwatch get-metric-statistics \ + --namespace AWS/Lambda \ + --metric-name ConcurrentExecutions \ + --dimensions Name=FunctionName,Value=laco-${ENVIRONMENT}-slack-router \ + --start-time $(date -u -d @$START_TIMESTAMP +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -u -r $START_TIMESTAMP +%Y-%m-%dT%H:%M:%S) \ + --end-time $(date -u +%Y-%m-%dT%H:%M:%S) \ + --period ${CLOUDWATCH_PERIOD} \ + --statistics Maximum Average \ + --region ${REGION} \ + --output table 2>/dev/null || echo " (No data)" + +echo_info "" +echo_info "Echo Worker Lambda:" +aws cloudwatch get-metric-statistics \ + --namespace AWS/Lambda \ + --metric-name ConcurrentExecutions \ + --dimensions Name=FunctionName,Value=laco-${ENVIRONMENT}-chatbot-echo-worker \ + --start-time $(date -u -d @$START_TIMESTAMP +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -u -r $START_TIMESTAMP +%Y-%m-%dT%H:%M:%S) \ + --end-time $(date -u +%Y-%m-%dT%H:%M:%S) \ + --period ${CLOUDWATCH_PERIOD} \ + --statistics Maximum Average \ + --region ${REGION} \ + --output table 2>/dev/null || echo " (No data)" + +echo_info "" +echo_info "6. Throttles" +echo_info "----------------------------------------" + +echo_info "Router Throttles:" +aws cloudwatch get-metric-statistics \ + --namespace AWS/Lambda \ + --metric-name Throttles \ + --dimensions Name=FunctionName,Value=laco-${ENVIRONMENT}-slack-router \ + --start-time $(date -u -d @$START_TIMESTAMP +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -u -r $START_TIMESTAMP +%Y-%m-%dT%H:%M:%S) \ + --end-time $(date -u +%Y-%m-%dT%H:%M:%S) \ + --period ${CLOUDWATCH_PERIOD} \ + --statistics Sum \ + --region ${REGION} \ + --output table 2>/dev/null || echo " (No data)" + +echo_info "" +echo_info "Worker Throttles:" +aws cloudwatch get-metric-statistics \ + --namespace AWS/Lambda \ + --metric-name Throttles \ + --dimensions Name=FunctionName,Value=laco-${ENVIRONMENT}-chatbot-echo-worker \ + --start-time $(date -u -d @$START_TIMESTAMP +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -u -r $START_TIMESTAMP +%Y-%m-%dT%H:%M:%S) \ + --end-time $(date -u +%Y-%m-%dT%H:%M:%S) \ + --period ${CLOUDWATCH_PERIOD} \ + --statistics Sum \ + --region ${REGION} \ + --output table 2>/dev/null || echo " (No data)" + +echo_info "" +echo_info "========================================" +echo_info "SQS Metrics" +echo_info "========================================" + +# 7. SQS Queue Metrics +echo_info "" +echo_info "7. SQS Queue Age" +echo_info "----------------------------------------" + +aws cloudwatch get-metric-statistics \ + --namespace AWS/SQS \ + --metric-name ApproximateAgeOfOldestMessage \ + --dimensions Name=QueueName,Value=laco-${ENVIRONMENT}-chatbot-echo \ + --start-time $(date -u -d @$START_TIMESTAMP +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -u -r $START_TIMESTAMP +%Y-%m-%dT%H:%M:%S) \ + --end-time $(date -u +%Y-%m-%dT%H:%M:%S) \ + --period ${CLOUDWATCH_PERIOD} \ + --statistics Average Maximum \ + --region ${REGION} \ + --output table 2>/dev/null || echo " (No data)" + +echo_info "" +echo_info "8. SQS Queue Depth" +echo_info "----------------------------------------" + +aws cloudwatch get-metric-statistics \ + --namespace AWS/SQS \ + --metric-name ApproximateNumberOfMessagesVisible \ + --dimensions Name=QueueName,Value=laco-${ENVIRONMENT}-chatbot-echo \ + --start-time $(date -u -d @$START_TIMESTAMP +%Y-%m-%dT%H:%M:%S 2>/dev/null || date -u -r $START_TIMESTAMP +%Y-%m-%dT%H:%M:%S) \ + --end-time $(date -u +%Y-%m-%dT%H:%M:%S) \ + --period ${CLOUDWATCH_PERIOD} \ + --statistics Average Maximum \ + --region ${REGION} \ + --output table 2>/dev/null || echo " (No data)" + +fi # End of quiet mode skip + +echo_info "" +echo_info "========================================" +echo_info "Component-Level Breakdown" +echo_info "========================================" +echo_info "" +echo_info "Latency breakdown from Performance metrics (Section 0):" +echo_info "" +echo_info "┌──────────────────────────────────────────────────────────┐" +echo_info "│ Component Flow │ Metric │" +echo_info "├──────────────────────────────────────────────────────────┤" +echo_info "│ 1. API Gateway → Router Lambda │ See section 1 │" +echo_info "│ 2. Router → EventBridge → SQS │ avg_queue_wait_ms │" +echo_info "│ 3. Worker Lambda Processing │ avg_worker_ms │" +echo_info "│ ├─ Sync Response │ avg_sync_resp_ms │" +echo_info "│ └─ Async Response │ avg_async_resp_ms │" +echo_info "│ Total E2E (API Gateway → Done) │ avg_e2e_ms │" +echo_info "└──────────────────────────────────────────────────────────┘" +echo_info "" +echo_info "Formula:" +echo_info " totalE2eMs = workerDurationMs + queueWaitMs" +echo_info " queueWaitMs = Router processing + EventBridge + SQS polling" +echo_info "" +echo_info "========================================" +echo_info "Analysis Complete" +echo_info "========================================" +echo_info "" +echo_info "Summary: Component performance analyzed with structured metrics" +echo_info "" +echo_info "Key Metrics to Check:" +echo_info " 1. E2E P95 < 3000ms (Total user experience)" +echo_info " 2. Queue Wait avg < 1000ms (Router + EventBridge + SQS)" +echo_info " 3. Worker P95 < 2500ms (Command processing)" +echo_info " 4. Sync Response < 500ms (First Slack response)" +echo_info " 5. No throttles (Concurrency OK)" +echo_info " 6. Error rate < 1% (System stable)" +echo_info "" diff --git a/applications/chatops/slack-bot/performance-tests/artillery-config.yml b/applications/chatops/slack-bot/performance-tests/artillery-config.yml new file mode 100644 index 0000000..f78ba54 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/artillery-config.yml @@ -0,0 +1,88 @@ +config: + target: "{{ $processEnvironment.API_GATEWAY_URL }}" + phases: + # Warm-up phase - ensure Lambdas are warm + - duration: 60 + arrivalRate: 2 + name: "Warm-up (2 req/s)" + + # Gradual ramp-up to test scaling + - duration: 180 + arrivalRate: 5 + rampTo: 20 + name: "Ramp-up (5 -> 20 req/s)" + + # Sustained load - test steady state performance + - duration: 300 + arrivalRate: 20 + name: "Sustained load (20 req/s)" + + # Approach API Gateway limit + - duration: 120 + arrivalRate: 20 + rampTo: 45 + name: "High load (20 -> 45 req/s)" + + # Cool-down + - duration: 60 + arrivalRate: 5 + name: "Cool-down (5 req/s)" + + processor: "./slack-signature-processor.js" + + plugins: + metrics-by-endpoint: + stripQueryString: true + metricsNamespace: "slack_bot_perf_test" + + # Performance thresholds + ensure: + maxErrorRate: 1 # Max 1% error rate + p95: 3000 # 95th percentile under 3 seconds + p99: 5000 # 99th percentile under 5 seconds + +scenarios: + # Weighted distribution matching realistic usage + - name: "Echo Command (Fast)" + weight: 40 + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U{{ $randomNumber(1000, 9999) }}&user_name=testuser&command=/echo&text=performance test {{ $randomNumber(1, 10000) }}&response_url=https://hooks.slack.com/commands/T123/456/token&trigger_id=123.456.abc" + afterResponse: "captureMetrics" + + - name: "Status Command (Medium)" + weight: 30 + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U{{ $randomNumber(1000, 9999) }}&user_name=testuser&command=/status&text=&response_url=https://hooks.slack.com/commands/T123/456/token&trigger_id=123.456.abc" + afterResponse: "captureMetrics" + + - name: "Deploy Command (Slow)" + weight: 20 + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U{{ $randomNumber(1000, 9999) }}&user_name=testuser&command=/deploy&text=app-v{{ $randomNumber(1, 100) }}.0&response_url=https://hooks.slack.com/commands/T123/456/token&trigger_id=123.456.abc" + afterResponse: "captureMetrics" + + - name: "Build Command (Slow)" + weight: 10 + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U{{ $randomNumber(1000, 9999) }}&user_name=testuser&command=/build&text=main&response_url=https://hooks.slack.com/commands/T123/456/token&trigger_id=123.456.abc" + afterResponse: "captureMetrics" diff --git a/applications/chatops/slack-bot/performance-tests/artillery-echo-light.yml b/applications/chatops/slack-bot/performance-tests/artillery-echo-light.yml new file mode 100644 index 0000000..22063ae --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/artillery-echo-light.yml @@ -0,0 +1,65 @@ +config: + target: "{{ $processEnvironment.API_GATEWAY_URL }}" + + # HTTP timeout configuration (increased to handle high load) + http: + timeout: 30 # 30 seconds timeout (default: 10s) + pool: 50 # Connection pool size + + # Enable console reporting + engines: + socketio: + maxPoolSize: 50 + + phases: + # Warm-up phase + - duration: 30 + arrivalRate: 2 + name: "Warm-up (2 req/s)" + + # Gradual ramp-up (API Gateway 제한 고려) + - duration: 120 + arrivalRate: 5 + rampTo: 20 + name: "Ramp-up (5 -> 20 req/s)" + + # Sustained load (안전한 범위) + - duration: 180 + arrivalRate: 20 + name: "Sustained load (20 req/s)" + + # Medium-high load (제한의 80%) + - duration: 60 + arrivalRate: 20 + rampTo: 40 + name: "High load (20 -> 40 req/s)" + + # Cool-down + - duration: 30 + arrivalRate: 5 + name: "Cool-down (5 req/s)" + + processor: "./slack-signature-processor.js" + + plugins: + metrics-by-endpoint: + stripQueryString: true + metricsNamespace: "slack_bot_echo_light" + + # Performance thresholds for echo command + ensure: + maxErrorRate: 1 # Max 1% error rate + p95: 2000 # 95th percentile under 2s + p99: 3000 # 99th percentile under 3s + +scenarios: + # Only echo command - API Gateway 제한 고려 + - name: "Echo Command Performance Test (Light)" + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "" # Body will be generated in beforeRequest hook + afterResponse: "captureMetrics" diff --git a/applications/chatops/slack-bot/performance-tests/artillery-echo-minimal.yml b/applications/chatops/slack-bot/performance-tests/artillery-echo-minimal.yml new file mode 100644 index 0000000..5ffb318 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/artillery-echo-minimal.yml @@ -0,0 +1,49 @@ +config: + target: "{{ $processEnvironment.API_GATEWAY_URL }}" + + phases: + # Cold start 측정 (1 request) + - duration: 5 + arrivalRate: 1 + name: "Cold start check" + + # Warm-up (통계적으로 유의미한 샘플: 50개) + - duration: 25 + arrivalRate: 2 + name: "Warm state baseline (2 req/s)" + + # Peak load test (짧게 부하 확인: 30개) + - duration: 10 + arrivalRate: 3 + rampTo: 6 + name: "Peak load spike (3 -> 6 req/s)" + + # Sustained performance (안정 상태 확인: 40개) + - duration: 20 + arrivalRate: 2 + name: "Sustained performance (2 req/s)" + + processor: "./slack-signature-processor.js" + + plugins: + metrics-by-endpoint: + stripQueryString: true + metricsNamespace: "slack_bot_echo_minimal" + + # Performance thresholds + ensure: + maxErrorRate: 1 # Max 1% error rate + p95: 3000 # 95th percentile under 3s + p99: 4000 # 99th percentile under 4s + +scenarios: + # Minimal echo command test - 모든 메트릭 확인 가능 + - name: "Echo Command Minimal Test" + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "" # Body will be generated in beforeRequest hook + afterResponse: "captureMetrics" diff --git a/applications/chatops/slack-bot/performance-tests/artillery-echo-only.yml b/applications/chatops/slack-bot/performance-tests/artillery-echo-only.yml new file mode 100644 index 0000000..4c8c1b3 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/artillery-echo-only.yml @@ -0,0 +1,54 @@ +config: + target: "{{ $processEnvironment.API_GATEWAY_URL }}" + phases: + # Warm-up phase + - duration: 30 + arrivalRate: 2 + name: "Warm-up (2 req/s)" + + # Gradual ramp-up + - duration: 120 + arrivalRate: 5 + rampTo: 30 + name: "Ramp-up (5 -> 30 req/s)" + + # Sustained load + - duration: 180 + arrivalRate: 30 + name: "Sustained load (30 req/s)" + + # High load test + - duration: 60 + arrivalRate: 30 + rampTo: 50 + name: "High load (30 -> 50 req/s)" + + # Cool-down + - duration: 30 + arrivalRate: 5 + name: "Cool-down (5 req/s)" + + processor: "./slack-signature-processor.js" + + plugins: + metrics-by-endpoint: + stripQueryString: true + metricsNamespace: "slack_bot_echo_test" + + # Performance thresholds for echo command + ensure: + maxErrorRate: 1 # Max 1% error rate + p95: 2000 # Echo should be faster - 95th percentile under 2s + p99: 3000 # 99th percentile under 3s + +scenarios: + # Only echo command - simplest, fastest processing + - name: "Echo Command Performance Test" + flow: + - post: + url: "/" + headers: + Content-Type: "application/x-www-form-urlencoded" + beforeRequest: "generateSlackSignature" + body: "token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U{{ $randomNumber(1000, 9999) }}&user_name=testuser{{ $randomNumber(1, 100) }}&command=/echo&text=performance test message {{ $randomNumber(1, 100000) }}&response_url=https://hooks.slack.com/commands/T123/456/token&trigger_id=123.456.abc" + afterResponse: "captureMetrics" diff --git a/applications/chatops/slack-bot/performance-tests/capture-report.js b/applications/chatops/slack-bot/performance-tests/capture-report.js new file mode 100644 index 0000000..1efeba7 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/capture-report.js @@ -0,0 +1,91 @@ +#!/usr/bin/env node +/** + * Capture performance report HTML as a screenshot + * Usage: node capture-report.js [html-file] [output-file] + * Example: node capture-report.js results/test-20260102-155018.html results/test-20260102-155018.png + */ + +const fs = require('fs'); +const path = require('path'); + +async function captureReport() { + // Get arguments + const htmlFile = process.argv[2]; + const outputFile = process.argv[3]; + + if (!htmlFile) { + console.error('❌ Usage: node capture-report.js [output-file]'); + console.error('Example: node capture-report.js results/test-20260102-155018.html'); + process.exit(1); + } + + // Resolve paths + const htmlPath = path.resolve(htmlFile); + const outPath = outputFile + ? path.resolve(outputFile) + : htmlPath.replace(/\.html$/, '.png'); + + // Check if HTML file exists + if (!fs.existsSync(htmlPath)) { + console.error(`❌ HTML file not found: ${htmlPath}`); + process.exit(1); + } + + try { + // Dynamically import puppeteer + let puppeteer; + try { + puppeteer = await import('puppeteer'); + } catch (e) { + console.error('❌ Puppeteer not installed. Install with:'); + console.error(' npm install puppeteer --save-dev'); + console.error(' Or: yarn add -D puppeteer'); + process.exit(1); + } + + console.log(`📸 Capturing HTML report: ${htmlPath}`); + + const browser = await puppeteer.default.launch({ + headless: 'new', + args: ['--no-sandbox', '--disable-setuid-sandbox'] + }); + + const page = await browser.newPage(); + + // Set viewport to capture full width + await page.setViewport({ + width: 1400, + height: 900, + deviceScaleFactor: 2 // 2x for better quality + }); + + // Load HTML file + await page.goto(`file://${htmlPath}`, { + waitUntil: 'networkidle0' + }); + + // Get full page height + const fullHeight = await page.evaluate(() => { + return document.documentElement.scrollHeight; + }); + + // Screenshot full page + await page.screenshot({ + path: outPath, + fullPage: true, + type: 'png' + }); + + await browser.close(); + + const fileSize = (fs.statSync(outPath).size / 1024).toFixed(1); + console.log(`✅ Screenshot saved: ${outPath} (${fileSize}KB)`); + console.log(` Dimensions: 1400x${fullHeight}px @ 2x scale`); + + } catch (error) { + console.error('❌ Failed to capture report:', error.message); + process.exit(1); + } +} + +captureReport(); diff --git a/applications/chatops/slack-bot/performance-tests/render-report.js b/applications/chatops/slack-bot/performance-tests/render-report.js new file mode 100644 index 0000000..4de5f71 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/render-report.js @@ -0,0 +1,1652 @@ +#!/usr/bin/env node +"use strict"; + +const fs = require("fs"); +const path = require("path"); + +function die(message) { + console.error(message); + process.exit(1); +} + +const inputPath = process.argv[2]; +if (!inputPath) { + die("Usage: node render-report.js [output.html]"); +} + +if (!fs.existsSync(inputPath)) { + die(`Input not found: ${inputPath}`); +} + +let outputPath = process.argv[3]; +if (!outputPath) { + outputPath = inputPath.endsWith(".json") + ? inputPath.slice(0, -5) + ".html" + : inputPath + ".html"; +} + +// Load .metrics.json file if exists +let metricsData = null; +if (inputPath.endsWith(".json") && !inputPath.includes(".metrics.json")) { + const metricsPath = inputPath.slice(0, -5) + ".metrics.json"; + if (fs.existsSync(metricsPath)) { + try { + metricsData = JSON.parse(fs.readFileSync(metricsPath, "utf8")); + console.log(`✓ Loaded metrics from: ${metricsPath}`); + } catch (err) { + console.warn(`Warning: Failed to parse metrics: ${err.message}`); + } + } +} + +let raw; +try { + raw = JSON.parse(fs.readFileSync(inputPath, "utf8")); +} catch (err) { + die(`Failed to parse JSON: ${err.message}`); +} + +const aggregate = raw.aggregate || {}; +const counters = aggregate.counters || {}; +const summaries = aggregate.summaries || {}; +const histograms = aggregate.histograms || {}; +const intermediate = Array.isArray(raw.intermediate) ? raw.intermediate : []; + +const responseSummary = summaries["http.response_time"] || {}; +const sessionSummary = summaries["vusers.session_length"] || {}; +const responseHistogram = histograms["http.response_time"] || {}; + +const toNumber = (value) => (Number.isFinite(value) ? value : 0); + +const totalRequests = toNumber(counters["http.requests"]); +const totalResponses = toNumber(counters["http.responses"]); +const errorCount = toNumber(counters["errors.total"]); +const durationMs = toNumber(sessionSummary.max); +const avgRps = durationMs ? totalRequests / (durationMs / 1000) : 0; + +const httpCodes = {}; +const otherCodes = {}; +Object.entries(counters).forEach(([key, value]) => { + const match = key.match(/(?:^|\\.)codes\\.(\\d+)$/); + if (!match) return; + const code = match[1]; + if (key.startsWith("http.codes.")) { + httpCodes[code] = (httpCodes[code] || 0) + toNumber(value); + } else { + otherCodes[code] = (otherCodes[code] || 0) + toNumber(value); + } +}); + +const codesSource = Object.keys(httpCodes).length ? httpCodes : otherCodes; +const codes = Object.entries(codesSource) + .map(([code, count]) => ({ code, count })) + .sort((a, b) => Number(a.code) - Number(b.code)); + +const errorMap = {}; +Object.entries(counters).forEach(([key, value]) => { + // Only count top-level errors.* to avoid duplicates + // Artillery creates both "errors.ETIMEDOUT" and "scenario.endpoint.errors.ETIMEDOUT" + if (!key.startsWith("errors.")) return; + const label = key.slice("errors.".length); + if (label === "total") return; + errorMap[label] = toNumber(value); +}); + +const errors = Object.entries(errorMap) + .map(([label, count]) => ({ label, count })) + .sort((a, b) => b.count - a.count); + +const summary = { + requests: totalRequests, + responses: totalResponses, + errors: errorCount, + errorRate: totalRequests ? (errorCount / totalRequests) * 100 : 0, + durationMs, + avgRps, + min: toNumber(responseSummary.min), + max: toNumber(responseSummary.max), + mean: toNumber(responseSummary.mean), + median: toNumber(responseSummary.median || responseSummary.p50), + p95: toNumber(responseSummary.p95), + p99: toNumber(responseSummary.p99), + cloudwatch: metricsData ? metricsData.cloudwatch : null, + testInfo: metricsData ? { + timestamp: metricsData.timestamp, + environment: metricsData.environment, + testFile: metricsData.testFile, + timeRange: metricsData.timeRange + } : null, +}; + +const vusers = { + created: toNumber(counters["vusers.created"]), + completed: toNumber(counters["vusers.completed"]), + failed: toNumber(counters["vusers.failed"]), + scenarios: Object.entries(counters) + .filter(([key]) => key.startsWith("vusers.created_by_name.")) + .map(([key, value]) => ({ + name: key.replace("vusers.created_by_name.", ""), + count: toNumber(value), + })) + .sort((a, b) => b.count - a.count), +}; + +summary.vusersCreated = vusers.created; +summary.vusersCompleted = vusers.completed; +summary.vusersFailed = vusers.failed; + +const endpoints = Object.entries(summaries) + .filter(([key]) => key.includes(".response_time.") && !key.startsWith("http.")) + .map(([key, stats]) => { + const label = key.split(".response_time.")[1] || key; + const histogram = histograms[key] || {}; + return { + label, + min: toNumber(stats.min), + max: toNumber(stats.max), + mean: toNumber(stats.mean), + median: toNumber(stats.median || stats.p50), + p95: toNumber(stats.p95 || histogram.p95), + p99: toNumber(stats.p99 || histogram.p99), + count: toNumber(stats.count), + }; + }) + .sort((a, b) => b.p95 - a.p95); + +const rawAggregate = { + counters, + summaries, + histograms, + rates: aggregate.rates || {}, + firstCounterAt: aggregate.firstCounterAt, + lastCounterAt: aggregate.lastCounterAt, + firstHistogramAt: aggregate.firstHistogramAt, + lastHistogramAt: aggregate.lastHistogramAt, + firstMetricAt: aggregate.firstMetricAt, + lastMetricAt: aggregate.lastMetricAt, + period: aggregate.period, +}; + +const series = intermediate + .map((item, index) => { + const stats = (item.summaries || {})["http.response_time"] || {}; + const rates = item.rates || {}; + const ts = item.period || item.firstMetricAt || index; + return { + ts, + median: toNumber(stats.median || stats.p50), + p95: toNumber(stats.p95), + p99: toNumber(stats.p99), + rps: toNumber(rates["http.request_rate"]), + }; + }) + .filter((point) => point.ts !== undefined); + +const percentileKeys = ["p50", "p75", "p90", "p95", "p99", "p999"]; +const percentiles = percentileKeys + .map((key) => ({ + label: key.toUpperCase(), + value: toNumber(responseHistogram[key]), + })) + .filter((item) => item.value > 0); + +const reportData = { + meta: { + source: path.basename(inputPath), + generatedAt: new Date().toISOString(), + }, + summary, + series, + percentiles, + codes, + errors, + endpoints, + scenarios: vusers.scenarios, + rawAggregate, +}; + +const html = ` + + + + + Artillery E2E Report + + + + + + +
+

Artillery E2E Report

+
+
+
+ + +

1. Test Configuration

+ +
+
+
+
Environment
+
-
+
+
+
Test Started
+
-
+
+
+
Test Duration
+
+
+
+
Test File
+
-
+
+
+
+
+
Total VUsers
+
+
+
+
Total Requests
+
+
+
+
Avg Request Rate
+
+
+
+
+ +

2. Summary

+ + +

Throughput

+
+
+
E2E Throughput
+
-
+
requests/sec
+
+
+
Router RPS
+
-
+
invocations/sec
+
+
+
Worker RPS
+
-
+
invocations/sec
+
+
+
Total Processed
+
-
+
E2E requests
+
+
+ + +

Requests & Errors

+
+
+
Total Requests
+
+
+
+
Successful Responses
+
+
+
+
VUsers Completed
+
+
+ 0 failed +
+
+
+
Error Rate
+
+
+
+
+ +

3. Client-Side Metrics

+
+ Measurement Scope: HTTP request sent from client → Response received by client
+ Includes: Network latency + API Gateway processing + Lambda cold start + Execution time +
+ +
+
+
Median Latency
+
+
+
+
P95 Latency
+
+
+
+
P99 Latency
+
+
+
+
Max Latency
+
+
+
+ +
+
+

Client Response Time (HTTP Round-Trip)

+
+ + + +
+ +
+
+

Request Rate (req/s)

+ +
+
+

Latency Percentiles (ms)

+ +
+
+

Status Codes

+
No status codes recorded.
+ +
+
+

Error Types

+
No errors recorded.
+ +
+
+ +
+
+

Endpoint Latency (ms)

+ + + + + + + + + + + +
EndpointMedianP95P99Count
+
+
+

Scenario VUsers

+ + + + + + + + +
ScenarioCreated
+
+
+ + + + +

5. Raw Data (Evidence)

+
+
+ Counters +

+        
+
+ Summaries +

+        
+
+ Histograms +

+        
+
+ Rates +

+        
+
+ Timing Metadata +

+        
+
+
+ Charts are rendered in-browser using Chart.js (CDN). +
+
+ + + + + +`; + +fs.writeFileSync(outputPath, html, "utf8"); +console.log(`Report written to ${outputPath}`); diff --git a/applications/chatops/slack-bot/performance-tests/slack-signature-processor.js b/applications/chatops/slack-bot/performance-tests/slack-signature-processor.js new file mode 100644 index 0000000..df7fc9a --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/slack-signature-processor.js @@ -0,0 +1,133 @@ +const crypto = require('crypto'); +const { SSMClient, GetParameterCommand } = require('@aws-sdk/client-ssm'); + +let slackSigningSecret = null; +let isFetchingSecret = false; +let secretPromise = null; + +/** + * Fetch Slack signing secret from AWS SSM Parameter Store + * Cached after first retrieval for performance + */ +async function getSlackSigningSecret() { + // Return cached secret if available + if (slackSigningSecret) { + return slackSigningSecret; + } + + // If already fetching, wait for that promise + if (isFetchingSecret && secretPromise) { + return secretPromise; + } + + // Start fetching + isFetchingSecret = true; + secretPromise = (async () => { + try { + const environment = process.env.ENVIRONMENT || 'plt'; + const region = process.env.AWS_REGION || 'ca-central-1'; + const parameterName = `/laco/${environment}/aws/secrets/slack/signing-secret`; + + console.log(`Fetching Slack signing secret from SSM: ${parameterName}`); + + const ssm = new SSMClient({ region }); + const response = await ssm.send(new GetParameterCommand({ + Name: parameterName, + WithDecryption: true + })); + + slackSigningSecret = response.Parameter.Value; + console.log('✓ Slack signing secret retrieved successfully'); + return slackSigningSecret; + } catch (error) { + console.error('✗ Failed to fetch Slack signing secret from SSM:', error.message); + console.error(' Make sure you have AWS credentials configured and access to Parameter Store'); + throw error; + } finally { + isFetchingSecret = false; + } + })(); + + return secretPromise; +} + +/** + * Generate Slack request signature + * See: https://api.slack.com/authentication/verifying-requests-from-slack + * + * IMPORTANT: Must be called AFTER Artillery processes templates + * This is a workaround since Artillery evaluates templates after beforeRequest hooks + */ +function generateSlackSignature(requestParams, context, ee, next) { + // Generate random values first (to replace Artillery templates) + const userId = crypto.randomInt(1000, 10000); // 1000–9999 inclusive + const userName = crypto.randomInt(1, 101); // 1–100 inclusive + const messageId = crypto.randomInt(1, 100001); // 1–100000 inclusive + + // Build body with actual values (not templates) + // Use special response_url for performance tests (will be mocked in Lambda) + const body = `token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U${userId}&user_name=testuser${userName}&command=/echo&text=performance test message ${messageId}&response_url=https://hooks.slack.com/test/perf-test-mock&trigger_id=123.456.abc`; + + // Replace requestParams.body with our pre-evaluated body + requestParams.body = body; + + const timestamp = Math.floor(Date.now() / 1000); + + getSlackSigningSecret() + .then(secret => { + // Create signature base string (must match exactly what Slack/Lambda expects) + const sigBasestring = `v0:${timestamp}:${body}`; + + // Generate HMAC signature + const signature = 'v0=' + crypto + .createHmac('sha256', secret) + .update(sigBasestring) + .digest('hex'); + + // Add headers to request (case-sensitive for API Gateway) + requestParams.headers['X-Slack-Request-Timestamp'] = timestamp.toString(); + requestParams.headers['X-Slack-Signature'] = signature; + + // Debug log for first few requests + if (context.vars.$loopCount === undefined || context.vars.$loopCount < 3) { + console.log(`[DEBUG] Signature generated for timestamp ${timestamp}`); + console.log(`[DEBUG] Body length: ${body.length}`); + console.log(`[DEBUG] Body preview: ${body.substring(0, 100)}...`); + console.log(`[DEBUG] Signature: ${signature.substring(0, 20)}...`); + } + + return next(); + }) + .catch(err => { + console.error('Failed to generate signature:', err); + return next(err); + }); +} + +/** + * Capture response metrics for analysis + */ +function captureMetrics(requestParams, response, context, ee, next) { + const statusCode = response.statusCode; + const responseTime = response.timings.phases.total; + + // Log slow responses + if (responseTime > 3000) { + console.log(`⚠ Slow response: ${responseTime}ms (status: ${statusCode})`); + } + + // Log errors + if (statusCode >= 400) { + console.log(`✗ Error response: ${statusCode} (time: ${responseTime}ms)`); + if (response.body) { + console.log(` Body: ${response.body.substring(0, 200)}`); + } + } + + return next(); +} + +module.exports = { + generateSlackSignature, + captureMetrics +}; diff --git a/applications/chatops/slack-bot/performance-tests/test-curl.sh b/applications/chatops/slack-bot/performance-tests/test-curl.sh new file mode 100755 index 0000000..375d7ed --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/test-curl.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# Test Slack signature with curl + +set -e + +# Check if API Gateway URL is provided +if [ -z "$1" ]; then + echo "Usage: $0 " + echo "" + echo "Example:" + echo " $0 https://xxxxxx.execute-api.ca-central-1.amazonaws.com/prod/slack" + echo "" + exit 1 +fi + +API_GATEWAY_URL="$1" +ENVIRONMENT="${ENVIRONMENT:-plt}" + +echo "========================================" +echo "Slack Signature Test with curl" +echo "========================================" +echo "" + +# Get signing secret from SSM +echo "Fetching Slack signing secret..." +SIGNING_SECRET=$(aws ssm get-parameter \ + --name "/laco/${ENVIRONMENT}/aws/secrets/slack/signing-secret" \ + --with-decryption \ + --query 'Parameter.Value' \ + --output text \ + --region ca-central-1) + +if [ -z "$SIGNING_SECRET" ]; then + echo "ERROR: Could not fetch signing secret" + exit 1 +fi + +echo "✓ Signing secret retrieved" +echo "" + +# Prepare request +TIMESTAMP=$(date +%s) +BODY="token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U1234&user_name=testuser&command=/echo&text=test message&response_url=https://hooks.slack.com/test&trigger_id=123.456" + +echo "Request details:" +echo " Timestamp: $TIMESTAMP" +echo " Body: ${BODY:0:80}..." +echo "" + +# Generate signature +SIG_BASESTRING="v0:${TIMESTAMP}:${BODY}" +SIGNATURE="v0=$(echo -n "$SIG_BASESTRING" | openssl dgst -sha256 -hmac "$SIGNING_SECRET" | awk '{print $2}')" + +echo "Generated signature: ${SIGNATURE:0:30}..." +echo "" + +# Make request +echo "Sending request to: ${API_GATEWAY_URL}" +echo "" + +# Use --data-raw to prevent curl from encoding the body +RESPONSE=$(curl -s -w "\nHTTP_STATUS:%{http_code}" \ + -X POST "${API_GATEWAY_URL}" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -H "X-Slack-Request-Timestamp: ${TIMESTAMP}" \ + -H "X-Slack-Signature: ${SIGNATURE}" \ + --data-raw "$BODY") + +HTTP_STATUS=$(echo "$RESPONSE" | grep "HTTP_STATUS:" | cut -d: -f2) +BODY_RESPONSE=$(echo "$RESPONSE" | sed '/HTTP_STATUS:/d') + +echo "========================================" +echo "Response:" +echo "========================================" +echo "Status: $HTTP_STATUS" +echo "Body: $BODY_RESPONSE" +echo "" + +if [ "$HTTP_STATUS" == "200" ]; then + echo "✓ SUCCESS - Signature validation passed!" + exit 0 +elif [ "$HTTP_STATUS" == "401" ] || [ "$HTTP_STATUS" == "403" ]; then + echo "✗ FAILED - Signature validation failed (401/403)" + echo "" + echo "Debugging info:" + echo " - Check that signing secret is correct in SSM" + echo " - Verify timestamp is not too old (within 5 minutes)" + echo " - Ensure body format matches exactly what Lambda expects" + exit 1 +else + echo "✗ FAILED - Unexpected status code: $HTTP_STATUS" + exit 1 +fi diff --git a/applications/chatops/slack-bot/performance-tests/test-signature.js b/applications/chatops/slack-bot/performance-tests/test-signature.js new file mode 100644 index 0000000..9c13fa9 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/test-signature.js @@ -0,0 +1,47 @@ +// Test signature generation locally +const crypto = require('crypto'); + +// Simulated signing secret (replace with actual for testing) +const signingSecret = 'test-secret-replace-with-real'; + +// Simulate Artillery request +const timestamp = Math.floor(Date.now() / 1000); +const body = "token=test&team_id=T123&team_domain=test&channel_id=C123&channel_name=general&user_id=U1234&user_name=testuser1&command=/echo&text=performance test message 12345&response_url=https://hooks.slack.com/commands/T123/456/token&trigger_id=123.456.abc"; + +console.log('='.repeat(60)); +console.log('Testing Slack Signature Generation'); +console.log('='.repeat(60)); +console.log('\n1. Request Details:'); +console.log(' Timestamp:', timestamp); +console.log(' Body length:', body.length); +console.log(' Body:', body.substring(0, 100) + '...'); + +// Generate signature +const sigBasestring = `v0:${timestamp}:${body}`; +console.log('\n2. Signature Base String:'); +console.log(' ', sigBasestring.substring(0, 100) + '...'); + +const signature = 'v0=' + crypto + .createHmac('sha256', signingSecret) + .update(sigBasestring) + .digest('hex'); + +console.log('\n3. Generated Signature:'); +console.log(' ', signature); + +console.log('\n4. Headers to send:'); +console.log(' X-Slack-Request-Timestamp:', timestamp); +console.log(' X-Slack-Signature:', signature); + +console.log('\n5. Test with curl:'); +console.log(` +curl -X POST https://YOUR_API_GATEWAY_URL/slack \\ + -H "Content-Type: application/x-www-form-urlencoded" \\ + -H "X-Slack-Request-Timestamp: ${timestamp}" \\ + -H "X-Slack-Signature: ${signature}" \\ + -d '${body}' +`); + +console.log('\n' + '='.repeat(60)); +console.log('Note: Replace signingSecret with actual secret from SSM'); +console.log('='.repeat(60)); diff --git a/applications/chatops/slack-bot/performance-tests/view-results.sh b/applications/chatops/slack-bot/performance-tests/view-results.sh new file mode 100755 index 0000000..e5d5e44 --- /dev/null +++ b/applications/chatops/slack-bot/performance-tests/view-results.sh @@ -0,0 +1,140 @@ +#!/bin/bash +# View Artillery test results in terminal + +# metrics.json 파일 제외하고 실제 Artillery 테스트 결과만 찾기 +LATEST_JSON=$(ls -t results/*-test-*.json 2>/dev/null | grep -v '\.metrics\.json' | head -n1) + +if [ -z "$LATEST_JSON" ]; then + echo "No test results found" + exit 1 +fi + +# 대응하는 metrics.json 파일 확인 +METRICS_JSON="${LATEST_JSON%.json}.metrics.json" +HAS_METRICS=0 +if [ -f "$METRICS_JSON" ]; then + HAS_METRICS=1 +fi + +echo "========================================" +echo "Performance Test Results" +echo "========================================" +echo "File: $LATEST_JSON" +if [ $HAS_METRICS -eq 1 ]; then + echo "Metrics: $METRICS_JSON (CloudWatch + E2E Data)" +fi +echo "" + +if ! command -v jq &> /dev/null; then + echo "⚠️ jq not installed. Showing raw summary..." + echo "" + cat "$LATEST_JSON" | grep -A 50 "aggregate" +else + echo "Summary Statistics:" + echo "----------------------------------------" + + # Overall stats + echo "" + echo "📊 Request Statistics:" + jq -r '.aggregate.counters | to_entries[] | " \(.key): \(.value)"' "$LATEST_JSON" + + echo "" + echo "⏱️ Response Time (ms):" + jq -r '.aggregate.summaries["http.response_time"] | + " Min: \(.min)", + " Max: \(.max)", + " Median: \(.median)", + " P95: \(.p95)", + " P99: \(.p99)"' "$LATEST_JSON" + + echo "" + echo "🎯 Latency Percentiles (ms):" + jq -r '.aggregate.histograms["http.response_time"] | to_entries[] | + select(.key | tonumber? != null) | + " P\(.key): \(.value)"' "$LATEST_JSON" | sort -t: -k1 -V | head -10 + + echo "" + echo "📈 Throughput:" + DURATION=$(jq -r '.aggregate.summaries["vusers.session_length"].max // 0' "$LATEST_JSON") + REQUESTS=$(jq -r '.aggregate.counters["http.requests"] // 0' "$LATEST_JSON") + if [ "$DURATION" != "0" ]; then + RPS=$(echo "scale=2; $REQUESTS / ($DURATION / 1000)" | bc) + echo " Requests: $REQUESTS" + echo " Duration: ${DURATION}ms" + echo " Avg RPS: $RPS req/s" + fi + + echo "" + echo "❌ Error Codes:" + jq -r '.aggregate.codes // {} | to_entries[] | " \(.key): \(.value)"' "$LATEST_JSON" + + echo "" + echo "🔍 Errors:" + ERROR_COUNT=$(jq -r '.aggregate.counters["errors.total"] // 0' "$LATEST_JSON") + if [ "$ERROR_COUNT" != "0" ]; then + jq -r '.aggregate.errors // {} | to_entries[] | " \(.key): \(.value)"' "$LATEST_JSON" + else + echo " No errors" + fi + + echo "" + echo "========================================" + + # Check thresholds + echo "" + echo "Threshold Check:" + echo "----------------------------------------" + + P95=$(jq -r '.aggregate.summaries["http.response_time"].p95 // 0' "$LATEST_JSON") + P99=$(jq -r '.aggregate.summaries["http.response_time"].p99 // 0' "$LATEST_JSON") + ERROR_RATE=$(echo "scale=4; $ERROR_COUNT * 100 / $REQUESTS" | bc 2>/dev/null || echo "0") + + echo " P95 < 2000ms: $P95 ms $([ $(echo "$P95 < 2000" | bc) -eq 1 ] && echo "✓" || echo "✗")" + echo " P99 < 3000ms: $P99 ms $([ $(echo "$P99 < 3000" | bc) -eq 1 ] && echo "✓" || echo "✗")" + echo " Error < 1%: ${ERROR_RATE}% $([ $(echo "$ERROR_RATE < 1" | bc) -eq 1 ] && echo "✓" || echo "✗")" + + # CloudWatch Metrics가 있으면 표시 + if [ $HAS_METRICS -eq 1 ]; then + echo "" + echo "========================================" + echo "CloudWatch & E2E Metrics" + echo "========================================" + + echo "" + echo "📊 Router Lambda (API Gateway → Router):" + echo "----------------------------------------" + jq -r '.cloudwatch.router | " Invocations: \(.invocations)\n Avg: \(.avg_ms) ms\n P50: \(.p50_ms) ms\n P95: \(.p95_ms) ms\n P99: \(.p99_ms) ms\n Max: \(.max_ms) ms"' "$METRICS_JSON" + + echo "" + echo "⚙️ Worker Lambda (EventBridge → SQS → Worker):" + echo "----------------------------------------" + jq -r '.cloudwatch.worker | " Invocations: \(.invocations)\n Avg: \(.avg_ms) ms\n P50: \(.p50_ms) ms\n P95: \(.p95_ms) ms\n P99: \(.p99_ms) ms\n Max: \(.max_ms) ms"' "$METRICS_JSON" + + echo "" + echo "🔄 End-to-End (API Gateway → Worker Lambda Completion):" + echo "----------------------------------------" + E2E_DATA=$(jq -r '.cloudwatch.e2e // {} | length' "$METRICS_JSON") + if [ "$E2E_DATA" -eq 0 ]; then + echo " ⚠️ No E2E data available (Router not propagating correlation IDs)" + else + jq -r '.cloudwatch.e2e | " Invocations: \(.invocations)\n Avg: \(.avg_ms) ms\n P50: \(.p50_ms) ms\n P95: \(.p95_ms) ms\n P99: \(.p99_ms) ms\n Max: \(.max_ms) ms"' "$METRICS_JSON" + fi + + echo "" + echo "❌ Errors:" + echo "----------------------------------------" + jq -r '.cloudwatch.errors | " Router: \(.router)\n Worker: \(.worker)"' "$METRICS_JSON" + + echo "" + echo "⏱️ Test Time Range:" + echo "----------------------------------------" + jq -r '.timeRange | " Start: \((.startMs / 1000 | floor) | todate)\n End: \((.endMs / 1000 | floor) | todate)\n Duration: \(.durationMs) ms (\((.durationMs / 1000) | floor) sec)"' "$METRICS_JSON" + fi + +fi + +echo "" +echo "========================================" +echo "" +echo "💡 Tip: Use 'make perf-test-analyze-test' to update CloudWatch metrics" +echo "" diff --git a/applications/chatops/slack-bot/src/router/index.ts b/applications/chatops/slack-bot/src/router/index.ts index 5dd21c3..3f6097c 100644 --- a/applications/chatops/slack-bot/src/router/index.ts +++ b/applications/chatops/slack-bot/src/router/index.ts @@ -18,26 +18,79 @@ const eventBridgeClient = new EventBridgeClient({ }) }); +/** + * Log router performance metrics for monitoring and analysis + */ +function logRouterMetrics(params: { + statusCode: number; + duration: number; + correlationId?: string; + command?: string; + errorType?: string; + errorMessage?: string; +}) { + const { statusCode, duration, correlationId, command, errorType, errorMessage } = params; + const success = statusCode >= 200 && statusCode < 300; + + logger.info('Router performance metrics', { + correlationId, + command, + statusCode, + duration, + success, + ...(errorType && { errorType }), + ...(errorMessage && { errorMessage }) + }); +} + export async function handler(event: APIGatewayProxyEvent): Promise { + const apiGatewayStartTime = Date.now(); + logger.info('Router Lambda invoked', { path: event.path, - httpMethod: event.httpMethod + httpMethod: event.httpMethod, + startTime: apiGatewayStartTime }); try { + // Debug: Log request details for signature troubleshooting + const timestamp = event.headers['x-slack-request-timestamp'] || event.headers['X-Slack-Request-Timestamp'] || ''; + const signature = event.headers['x-slack-signature'] || event.headers['X-Slack-Signature'] || ''; + + logger.info('Request signature details', { + timestamp, + signaturePreview: signature.substring(0, 20) + '...', + bodyLength: (event.body || '').length, + bodyPreview: (event.body || '').substring(0, 100), + headerKeys: Object.keys(event.headers) + }); + // 1. Verify Slack signature const signingSecret = await getSlackSigningSecret(); const isValid = await verifySlackSignature( signingSecret, { - 'x-slack-request-timestamp': event.headers['x-slack-request-timestamp'] || event.headers['X-Slack-Request-Timestamp'] || '', - 'x-slack-signature': event.headers['x-slack-signature'] || event.headers['X-Slack-Signature'] || '' + 'x-slack-request-timestamp': timestamp, + 'x-slack-signature': signature }, event.body || '' ); if (!isValid) { - logger.warn('Invalid Slack signature'); + const duration = Date.now() - apiGatewayStartTime; + logger.warn('Invalid Slack signature', { + timestamp, + signaturePreview: signature.substring(0, 20) + '...' + }); + + logRouterMetrics({ + statusCode: 401, + duration, + correlationId: timestamp, + errorType: 'AuthenticationError', + errorMessage: 'Invalid Slack signature' + }); + return { statusCode: 401, body: JSON.stringify({ error: 'Invalid signature' }) @@ -46,6 +99,9 @@ export async function handler(event: APIGatewayProxyEvent): Promise { logger.info('Echo worker invoked', { recordCount: event.Records.length, @@ -59,9 +77,6 @@ export async function handler(event: SQSEvent): Promise { duration: syncDuration, }); - // Simulate some async work - await new Promise((resolve) => setTimeout(resolve, 2000)); - // Send asynchronous response const asyncStart = Date.now(); await sendSlackResponse(message.response_url, { @@ -95,9 +110,25 @@ export async function handler(event: SQSEvent): Promise { subsegment?.close(); const totalDuration = Date.now() - startTime; + const e2eDuration = message.api_gateway_start_time + ? Date.now() - message.api_gateway_start_time + : undefined; + + // Log structured performance metrics for CloudWatch Insights analysis + logWorkerMetrics({ + correlationId, + command: message.command, + totalE2eMs: e2eDuration, + workerDurationMs: totalDuration, + queueWaitMs: e2eDuration ? Math.max(0, e2eDuration - totalDuration) : undefined, + syncResponseMs: syncDuration, + asyncResponseMs: asyncDuration, + success: true + }); messageLogger.info('Echo command processed successfully', { totalDuration, + e2eDuration, }); } catch (error) { subsegment?.addError(error as Error); @@ -106,12 +137,22 @@ export async function handler(event: SQSEvent): Promise { } } catch (error) { const duration = Date.now() - startTime; + const err = error as Error; - messageLogger.error('Failed to process echo command', error as Error, { + messageLogger.error('Failed to process echo command', err, { messageId: record.messageId, duration, }); + // Log performance metrics even for failures + logWorkerMetrics({ + correlationId, + workerDurationMs: duration, + success: false, + errorType: err.name, + errorMessage: err.message + }); + // Add to failed items for retry batchItemFailures.push({ itemIdentifier: record.messageId }); }