diff --git a/.gitignore b/.gitignore index 5b5487a3d5..7a973e3116 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,20 @@ package-lock.json # Content generation /content/influxdb*/**/api/**/*.html +/content/influxdb*/**/api/**/*.md !api-docs/**/.config.yml /api-docs/redoc-static.html* + +# API documentation generation (generated by api-docs/scripts/) +/content/influxdb/*/api/** +/content/influxdb3/*/api/** +/content/influxdb3/*/reference/api/** +/static/openapi + +# Exception: hand-crafted API conceptual pages (not generated) +!/content/influxdb3/*/api/administration/ +!/content/influxdb3/*/api/administration/_index.md + /helper-scripts/output/* /telegraf-build !telegraf-build/templates @@ -38,6 +50,8 @@ tmp # TypeScript build output **/dist/ +# Exception: include compiled API doc scripts for easier use +!api-docs/scripts/dist/ **/dist-lambda/ # User context files for AI assistant tools diff --git a/api-docs/README.md b/api-docs/README.md index 3e59d120cf..3c35387d15 100755 --- a/api-docs/README.md +++ b/api-docs/README.md @@ -48,6 +48,7 @@ ``` 3. To generate the HTML files for local testing, follow the instructions to [generate API docs locally](#generate-api-docs-locally). + 4. To commit your updated spec files, push your branch to `influxdata/docs-v2`, and create a PR against the `master` branch. ## Update API docs for an InfluxDB OSS release @@ -106,8 +107,8 @@ # Copy the old version directory to a directory for the new version: cp -r v2.2 v2.3 ``` - -8. In your editor, update custom content files in NEW_VERSION/content. + +8. In your editor, update custom content files in NEW\_VERSION/content. 9. Enter the following commands into your terminal to fetch and process the contracts: @@ -117,6 +118,7 @@ ``` 10. To generate the HTML files for local testing, follow the instructions to [generate API docs locally](#generate-api-docs-locally). + 11. To commit your updated spec files, push your branch to `influxdata/docs-v2`, and create a PR against the `master` branch. ## Update API docs for OSS spec changes between releases @@ -142,6 +144,8 @@ Follow these steps to update OSS API docs between version releases--for example, git cherry-pick [COMMIT_SHAs] git push -f origin docs-release/influxdb-oss + ``` + 4. Go into your `docs-v2` directory and create a branch for your changes--for example: ```sh @@ -165,6 +169,7 @@ Follow these steps to update OSS API docs between version releases--for example, ``` 7. To generate the HTML files for local testing, follow the instructions to [generate API docs locally](#generate-api-docs-locally). + 8. To commit your updated spec files, push your branch to `influxdata/docs-v2`, and create a PR against the `master` branch. ## Generate InfluxDB API docs @@ -197,7 +202,7 @@ The script uses `npx` to download and execute the Redocly CLI. If `npx` returns errors, [download](https://nodejs.org/en/) and run a recent version of the Node.js installer for your OS. -2. To generate API docs for _all_ InfluxDB versions in `./openapi`, enter the following command into your terminal: +2. To generate API docs for *all* InfluxDB versions in `./openapi`, enter the following command into your terminal: ```sh sh generate-api-docs.sh @@ -239,9 +244,9 @@ We regenerate API reference docs from `influxdata/openapi` ### InfluxDB OSS v2 version - Given that - `influxdata/openapi` **master** may contain OSS spec changes not implemented - in the current OSS release, we (Docs team) maintain a release branch, `influxdata/openapi` +Given that +`influxdata/openapi` **master** may contain OSS spec changes not implemented +in the current OSS release, we (Docs team) maintain a release branch, `influxdata/openapi` **docs-release/influxdb-oss**, used to generate OSS reference docs. ### How to find the API spec used by an InfluxDB OSS version @@ -249,7 +254,7 @@ We regenerate API reference docs from `influxdata/openapi` `influxdata/openapi` does not version the InfluxData API. To find the `influxdata/openapi` commit SHA used in a specific version of InfluxDB OSS, see `/scripts/fetch-swagger.sh` in `influxdata/influxdb`--for example, -for the `influxdata/openapi` commit used in OSS v2.2.0, see https://github.com/influxdata/influxdb/blob/v2.2.0/scripts/fetch-swagger.sh#L13=. +for the `influxdata/openapi` commit used in OSS v2.2.0, see . For convenience, we tag `influxdata/influxdb` (OSS) release points in `influxdata/openapi` as `influxdb-oss-v[OSS_VERSION]`. See . @@ -281,16 +286,17 @@ To add new YAML files for other nodes in the contracts, follow these steps: `@redocly/cli` also provides some [built-in decorators](https://redocly.com/docs/cli/decorators/) that you can configure in `.redocly` without having to write JavaScript. + ### How to add tag content or describe a group of paths In API reference docs, we use OpenAPI `tags` elements for navigation, the `x-traitTag` vendor extension for providing custom content, and the `x-tagGroups` vendor extension for grouping tags in navigation. -| Example | OpenAPI field | | -|:-------------------------------------------------------------------------------------------------------|-------------------------------------------------------|--------------------------------------------| -| [Add supplementary documentation](https://docs.influxdata.com/influxdb/cloud/api/#tag/Quick-start) | `tags: [ { name: 'Quick start', x-traitTag: true } ]` | [Source](https://github.com/influxdata/openapi/master/src/cloud/tags.yml) | -| Group tags in navigation | `x-tagGroups: [ { name: 'All endpoints', tags: [...], ...} ]` | [Source](https://github.com/influxdata/docs-v2/blob/da6c2e467de7212fc2197dfe0b87f0f0296688ee/api-docs/cloud-iox/content/tag-groups.yml)) | +| Example | OpenAPI field | | +| :------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| [Add supplementary documentation](https://docs.influxdata.com/influxdb/cloud/api/#tag/Quick-start) | `tags: [ { name: 'Quick start', x-traitTag: true } ]` | [Source](https://github.com/influxdata/openapi/master/src/cloud/tags.yml) | +| Group tags in navigation | `x-tagGroups: [ { name: 'All endpoints', tags: [...], ...} ]` | [Source](https://github.com/influxdata/docs-v2/blob/da6c2e467de7212fc2197dfe0b87f0f0296688ee/api-docs/cloud-iox/content/tag-groups.yml)) | #### Add and update x-tagGroups @@ -302,6 +308,47 @@ those tags. If you assign an empty array(`[]`) to the `All endpoints` x-tagGroup in `PLATFORM/content/tag-groups.yml`, the decorator replaces the empty array with the list of tags from all Operations in the spec. +## Documentation links in OpenAPI specs + +Use the `/influxdb/version/` placeholder when including InfluxDB links in OpenAPI spec description and summary fields. +The build process automatically transforms these placeholders to product-specific paths based on the spec file location. + +### Writing links + +```yaml +# In api-docs/influxdb3/core/openapi/ref.yml +info: + description: | + See [authentication](/influxdb/version/api/authentication/) for details. + Related: [tokens](/influxdb/version/admin/tokens/) +``` + +After build, these become: + +- `/influxdb3/core/api/authentication/` +- `/influxdb3/core/admin/tokens/` + +### How it works + +The product path is derived from the spec file location: + +- `api-docs/influxdb3/core/...` → `/influxdb3/core` +- `api-docs/influxdb3/enterprise/...` → `/influxdb3/enterprise` +- `api-docs/influxdb/v2/...` → `/influxdb/v2` + +Only `description` and `summary` fields are transformed. +Explicit cross-product links (e.g., `/telegraf/v1/plugins/`) remain unchanged. + +### Link validation + +Run with the `--validate-links` flag to check for broken links: + +```bash +yarn build:api-docs --validate-links +``` + +This validates that transformed links point to existing Hugo content files and warns about any broken links. + ## How to test your spec or API reference changes You can use `getswagger.sh` to fetch contracts from any URL. diff --git a/api-docs/getswagger.sh b/api-docs/getswagger.sh index 1ff077a45f..300aaa4b6a 100755 --- a/api-docs/getswagger.sh +++ b/api-docs/getswagger.sh @@ -62,8 +62,13 @@ function showHelp { subcommand=$1 case "$subcommand" in - cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|all) + cloud-dedicated-v2|cloud-dedicated-management|cloud-serverless-v2|clustered-management|clustered-v2|cloud-v2|v2|v1-compat|core-v3|enterprise-v3|influxdb3_core|influxdb3_enterprise|all) product=$1 + # Map alternative product names to canonical names + case "$product" in + influxdb3_core) product="core-v3" ;; + influxdb3_enterprise) product="enterprise-v3" ;; + esac shift while getopts ":o:b:BhV" opt; do @@ -150,19 +155,7 @@ function postProcess() { } function updateCloudDedicatedManagement { - outFile="influxdb3/cloud-dedicated/management/openapi.yml" - if [[ -z "$baseUrl" ]]; - then - echo "Using existing $outFile" - else - # Clone influxdata/granite and fetch the latest openapi.yaml file. - echo "Fetching the latest openapi.yaml file from influxdata/granite" - tmp_dir=$(mktemp -d) - git clone --depth 1 --branch main https://github.com/influxdata/granite.git "$tmp_dir" - cp "$tmp_dir/openapi.yaml" "$outFile" - rm -rf "$tmp_dir" - fi - postProcess $outFile 'influxdb3/cloud-dedicated/.config.yml' management@0 + bundleManagementWithOverlay "cloud-dedicated" } function updateCloudDedicatedV2 { @@ -188,19 +181,7 @@ function updateCloudServerlessV2 { } function updateClusteredManagement { - outFile="influxdb3/clustered/management/openapi.yml" - if [[ -z "$baseUrl" ]]; - then - echo "Using existing $outFile" - else - # Clone influxdata/granite and fetch the latest openapi.yaml file. - echo "Fetching the latest openapi.yaml file from influxdata/granite" - tmp_dir=$(mktemp -d) - git clone --depth 1 --branch main https://github.com/influxdata/granite.git "$tmp_dir" - cp "$tmp_dir/openapi.yaml" "$outFile" - rm -rf "$tmp_dir" - fi - postProcess $outFile 'influxdb3/clustered/.config.yml' management@0 + bundleManagementWithOverlay "clustered" } function updateClusteredV2 { @@ -214,28 +195,59 @@ function updateClusteredV2 { postProcess $outFile 'influxdb3/clustered/.config.yml' v2@2 } +# Bundle shared base spec with product-specific overlay +# Usage: bundleWithOverlay +# Example: bundleWithOverlay "core" "v3" +function bundleWithOverlay { + local product=$1 + local apiVersion=$2 + + local baseFile="influxdb3/shared/${apiVersion}/base.yml" + local overlayFile="influxdb3/${product}/${apiVersion}/overlay.yml" + local outFile="influxdb3/${product}/${apiVersion}/ref.yml" + local configFile="influxdb3/${product}/.config.yml" + + echo "Bundling ${product} ${apiVersion} with overlay..." + + # Apply overlay to base spec (run from project root for node_modules access) + local scriptDir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + local projectRoot=$(dirname "$scriptDir") + + (cd "$projectRoot" && node api-docs/scripts/apply-overlay.js "api-docs/$baseFile" "api-docs/$overlayFile" -o "api-docs/$outFile") + + # Apply Redocly decorators (info, servers, tag-groups from content/) + postProcess "$outFile" "$configFile" "${apiVersion}@3" +} + +# Bundle shared management base spec with product-specific overlay +# Usage: bundleManagementWithOverlay +# Example: bundleManagementWithOverlay "clustered" +function bundleManagementWithOverlay { + local product=$1 + + local baseFile="influxdb3/shared/management/base.yml" + local overlayFile="influxdb3/${product}/management/overlay.yml" + local outFile="influxdb3/${product}/management/openapi.yml" + local configFile="influxdb3/${product}/.config.yml" + + echo "Bundling ${product} management with overlay..." + + # Apply overlay to base spec (run from project root for node_modules access) + local scriptDir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + local projectRoot=$(dirname "$scriptDir") + + (cd "$projectRoot" && node api-docs/scripts/apply-overlay.js "api-docs/$baseFile" "api-docs/$overlayFile" -o "api-docs/$outFile") + + # Apply Redocly decorators + postProcess "$outFile" "$configFile" "management@0" +} + function updateCoreV3 { - outFile="influxdb3/core/v3/ref.yml" - if [[ -z "$baseUrl" ]]; - then - echo "Using existing $outFile" - else - local url="${baseUrl}/TO_BE_DECIDED" - curl $UPDATE_OPTIONS $url -o $outFile - fi - postProcess $outFile 'influxdb3/core/.config.yml' v3@3 + bundleWithOverlay "core" "v3" } function updateEnterpriseV3 { - outFile="influxdb3/enterprise/v3/ref.yml" - if [[ -z "$baseUrl" ]]; - then - echo "Using existing $outFile" - else - local url="${baseUrl}/TO_BE_DECIDED" - curl $UPDATE_OPTIONS $url -o $outFile - fi - postProcess $outFile 'influxdb3/enterprise/.config.yml' v3@3 + bundleWithOverlay "enterprise" "v3" } function updateOSSV2 { diff --git a/api-docs/influxdb/cloud/v2/ref.yml b/api-docs/influxdb/cloud/v2/ref.yml index 365e6d7db0..925601e9cf 100644 --- a/api-docs/influxdb/cloud/v2/ref.yml +++ b/api-docs/influxdb/cloud/v2/ref.yml @@ -126,7 +126,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers diff --git a/api-docs/influxdb/v2/v2/ref.yml b/api-docs/influxdb/v2/v2/ref.yml index 778aceaccc..1c8347ac30 100644 --- a/api-docs/influxdb/v2/v2/ref.yml +++ b/api-docs/influxdb/v2/v2/ref.yml @@ -142,7 +142,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/info.yml b/api-docs/influxdb3/cloud-dedicated/management/content/info.yml deleted file mode 100644 index b18e6956fd..0000000000 --- a/api-docs/influxdb3/cloud-dedicated/management/content/info.yml +++ /dev/null @@ -1,15 +0,0 @@ -title: InfluxDB 3 Cloud Dedicated Management API -x-influxdata-short-title: Management API -description: | - The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated cluster. - The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. - - This documentation is generated from the - InfluxDB OpenAPI specification. -license: - name: MIT - url: 'https://opensource.org/licenses/MIT' -contact: - name: InfluxData - url: https://www.influxdata.com - email: support@influxdata.com \ No newline at end of file diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/servers.yml b/api-docs/influxdb3/cloud-dedicated/management/content/servers.yml deleted file mode 100644 index 6e97ac2807..0000000000 --- a/api-docs/influxdb3/cloud-dedicated/management/content/servers.yml +++ /dev/null @@ -1,8 +0,0 @@ -- url: 'https://{baseurl}/api/v0' - description: InfluxDB 3 Cloud Dedicated Management API URL - variables: - baseurl: - enum: - - 'console.influxdata.com' - default: 'console.influxdata.com' - description: InfluxDB 3 Cloud Dedicated Console URL diff --git a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml b/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml deleted file mode 100644 index 57e8c8484c..0000000000 --- a/api-docs/influxdb3/cloud-dedicated/management/content/tag-groups.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Using the Management API - tags: - - Authentication - - Quickstart -- name: All endpoints - tags: [] diff --git a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml index a74165c29d..5099d8f854 100644 --- a/api-docs/influxdb3/cloud-dedicated/management/openapi.yml +++ b/api-docs/influxdb3/cloud-dedicated/management/openapi.yml @@ -974,11 +974,11 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - accountId - clusterId @@ -1083,7 +1083,7 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - description examples: @@ -1130,13 +1130,13 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' accessToken: $ref: '#/components/schemas/DatabaseTokenAccessToken' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - accountId - clusterId @@ -1279,11 +1279,11 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - accountId - clusterId @@ -1440,11 +1440,11 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - accountId - clusterId @@ -1653,7 +1653,7 @@ components: minLength: 1 ClusterDatabaseRetentionPeriod: description: | - The retention period of the [cluster database](/influxdb3/cloud-dedicated/admin/databases/) in nanoseconds, if applicable + The retention period of the [cluster database](/influxdb/version/admin/databases/) in nanoseconds, if applicable If the retention period is not set or is set to 0, the database will have infinite retention type: integer @@ -1683,11 +1683,11 @@ components: minimum: 1 ClusterDatabasePartitionTemplate: description: | - A template for [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) a cluster database. + A template for [partitioning](/influxdb/version/admin/custom-partitions/) a cluster database. - Each partition template part is evaluated in sequence. - The outputs from each part are concatenated with the - `|` delimiter to form the final partition key. + Each template part is evaluated in sequence, concatenating the final + partition key from the output of each part, delimited by the partition + key delimiter `|`. For example, using the partition template below: @@ -1727,7 +1727,7 @@ components: * `time=2023-01-01, a=` -> `2023|#|!|!` * `time=2023-01-01, c=` -> `2023|!|!|` - When using the default [partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) template (YYYY-MM-DD) there is no + When using the default [partitioning](/influxdb/version/admin/custom-partitions/) template (YYYY-MM-DD) there is no encoding necessary, as the derived partition key contains a single part, and no reserved characters. [`TemplatePart::Bucket`] parts by definition will always be within the part length limit and contain no restricted characters @@ -1829,7 +1829,7 @@ components: tagName: c numberOfBuckets: 10 ClusterDatabaseTableName: - description: The name of the [cluster database](/influxdb3/cloud-dedicated/admin/databases/) table + description: The name of the [cluster database](/influxdb/version/admin/databases/) table type: string examples: - TableOne @@ -1842,15 +1842,15 @@ components: - Limited Access Token - Full Access Token DatabaseTokenResourceAllDatabases: - description: A resource value for a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) permission that refers to all databases + description: A resource value for a [database token](/influxdb/version/admin/tokens/database/) permission that refers to all databases type: string enum: - '*' DatabaseTokenPermissionAction: - description: The action the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) permission allows + description: The action the [database token](/influxdb/version/admin/tokens/database/) permission allows type: string DatabaseTokenPermissionResource: - description: The resource the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) permission applies to + description: The resource the [database token](/influxdb/version/admin/tokens/database/) permission applies to anyOf: - $ref: '#/components/schemas/ClusterDatabaseName' - $ref: '#/components/schemas/DatabaseTokenResourceAllDatabases' @@ -1874,7 +1874,7 @@ components: - action: write resource: '*' DatabaseTokenPermissions: - description: The list of permissions the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) allows + description: The list of permissions the [database token](/influxdb/version/admin/tokens/database/) allows type: array items: $ref: '#/components/schemas/DatabaseTokenPermission' @@ -1887,7 +1887,7 @@ components: resource: '*' DatabaseTokenCreatedAt: description: | - The date and time that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) was created + The date and time that the [database token](/influxdb/version/admin/tokens/database/) was created Uses RFC3339 format $ref: '#/components/schemas/DateTimeRfc3339' diff --git a/api-docs/influxdb3/cloud-dedicated/management/overlay.yml b/api-docs/influxdb3/cloud-dedicated/management/overlay.yml new file mode 100644 index 0000000000..b53eb114fe --- /dev/null +++ b/api-docs/influxdb3/cloud-dedicated/management/overlay.yml @@ -0,0 +1,1617 @@ +overlay: 1.0.0 +info: + title: Cloud Dedicated Management API overlay + version: 1.0.0 +actions: + - target: $.info + update: + title: InfluxDB 3 Cloud Dedicated Management API + description: | + The Management API for InfluxDB 3 Cloud Dedicated provides a programmatic interface for managing a Cloud Dedicated cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB OpenAPI specification. + version: '' + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + - target: $.servers + update: + - url: https://{baseurl}/api/v0 + description: InfluxDB 3 Cloud Dedicated Management API URL + variables: + baseurl: + enum: + - console.influxdata.com + default: console.influxdata.com + description: InfluxDB 3 Cloud Dedicated Console URL + - target: $.tags + update: + - name: Authentication + x-traitTag: true + description: | + With InfluxDB 3 Cloud Dedicated, the InfluxDB Management API endpoints require the following credentials: + + - `ACCOUNT_ID`: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the cluster belongs to. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). + - `CLUSTER_ID`: The ID of the [cluster](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that you want to manage. To view account ID and cluster ID, [list cluster details](/influxdb3/cloud-dedicated/admin/clusters/list/#detailed-output-in-json). + - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/cloud-dedicated/admin/tokens/management/). + + See how to [create a management token](/influxdb3/cloud-dedicated/admin/tokens/management/). + + By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + + - name: Database tokens + description: Manage database read/write tokens for a cluster + - name: Databases + description: Manage databases for a cluster + - name: Quickstart + x-traitTag: true + description: | + The following example script shows how to use `curl` to make database and token management requests: + + ```shell + #!/bin/bash + + # Usage: + # Note the leading space in the command below to keep secrets out of the shell history + # + # ``` + # MANAGEMENT_TOKEN= ACCOUNT_ID= CLUSTER_ID= ./scripts/test_http_api_v0_endpoints.sh + # ``` + + # Env var validation + if [ -z "${MANAGEMENT_TOKEN}" ]; then + echo " + [Error]: ❌ + \$MANAGEMENT_TOKEN env var is required. + " + exit 1 + fi + + if [ -z "${ACCOUNT_ID}" ]; then + echo " + [Error]: ❌ + \$ACCOUNT_ID env var is required. + " + exit 1 + fi + + if [ -z "${CLUSTER_ID}" ]; then + echo " + [Error]: ❌ + \$CLUSTER_ID env var is required. + " + exit 1 + fi + + HOST="https://console.influxdata.com" + + # Database request functions + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + # Token request functions + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_token () { + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my test token", + "permissions": [ + { + "action": "write", + "resource": "database_one" + }, + { + "action": "read", + "resource": "database_two" + } + ] + }' \ + ) + echo "$response" + } + + get_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + update_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my updated test token", + "permissions": [ + { + "action": "database_one", + "resource": "read" + } + ] + }' \ + ) + echo "$response" + } + + delete_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + + # Test database endpoints + databaseName="test_database_$RANDOM" + + printf "\n🏗️ Creating database... 🏗️\n\n" + response="$(create_database $databaseName)" + echo $response | jq + printf "\n🏗️ Creating database successful 🏗️\n\n" + + printf "\n⬆️ Updating database... ⬆️\n\n" + response="$(update_database $databaseName)" + echo $response | jq + printf "\n⬆️ Updating database successful ⬆️\n\n" + + printf "\n⬇️ Listing databases... ⬇️\n\n" + response="$(list_databases)" + echo $response | jq + printf "\n⬇️ Listing databases successful ⬇️\n\n" + + printf "\n🗑️ Deleting database... 🗑️\n\n" + response="$(delete_database $databaseName)" + echo $response | jq + printf "\n🗑️ Deleting database successful 🗑️\n\n" + + + # Test token endpoints + printf "\n🏗️ Creating token... 🏗️\n\n" + response="$(create_token)" + echo $response | jq + tokenId=$(echo $response | jq '.id') + printf "\n🏗️ Creating token successful 🏗️\n\n" + + printf "\n⬇️ Getting token... ⬇️\n\n" + response="$(get_token $tokenId)" + echo $response | jq + printf "\n⬇️ Getting token successful ⬇️\n\n" + + printf "\n⬆️ Updating token... ⬆️\n\n" + response="$(update_token $tokenId)" + echo $response | jq + printf "\n⬆️ Updating token successful ⬆️\n\n" + + printf "\n📋 Listing tokens... 📋\n\n" + response="$(list_tokens)" + echo $response | jq + printf "\n📋 Listing tokens successful 📋\n\n" + + printf "\n🗑️ Deleting token... 🗑️\n\n" + response="$(delete_token $tokenId)" + echo $response | jq + printf "\n🗑️ Deleting token successful 🗑️\n\n" + ``` + - name: Tables + description: Manage tables in a database + - target: $.paths['/accounts/{accountId}/clusters/{clusterId}/databases'] + update: + get: + operationId: GetClusterDatabases + summary: Get all databases for a cluster + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to get the [databases](/influxdb3/cloud-dedicated/admin/databases/) for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster to get the [databases](/influxdb3/cloud-dedicated/admin/databases/) for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '200': + description: The cluster databases were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the database belongs to + $ref: '#/components/schemas/UuidV4' + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - accountId + - clusterId + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + example: + - accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + - accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: '' + lang: Shell + source: | + HOST="https://console.influxdata.com" + + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + tags: + - Databases + post: + operationId: CreateClusterDatabase + summary: Create a database + description: | + Create a database for a cluster. + + The database name must be unique within the cluster. + + **Default maximum number of columns**: 250 + **Default maximum number of tables**: 500 + + The retention period is specified in nanoseconds. For example, to set a retention period of 1 hour, use `3600000000000`. + + InfluxDB Cloud Dedicated lets you define a [custom partitioning](/influxdb3/cloud-dedicated/admin/custom-partitions/) strategy for each database and table. + A _partition_ is a logical grouping of data stored in [Apache Parquet](https://parquet.apache.org/). + By default, data is partitioned by day, + but, depending on your schema and workload, customizing the partitioning + strategy can improve query performance. + + To use custom partitioning, you define a [partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/partition-templates/). + If a table doesn't have a custom partition template, it inherits the database's template. + The partition template is set at the time of database creation and cannot be changed later. + For more information, see [Custom partitions](/influxdb3/cloud-dedicated/admin/custom-partitions/). + tags: + - Databases + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster to create the database for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database was successfully created + content: + application/json: + schema: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the database belongs to + $ref: '#/components/schemas/UuidV4' + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - accountId + - clusterId + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + allFields: + summary: All Fields + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + - target: $.paths['/accounts/{accountId}/clusters/{clusterId}/databases/{databaseName}'] + update: + patch: + operationId: UpdateClusterDatabase + summary: Update a database + tags: + - Databases + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster that the database belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: databaseName + in: path + description: The name of the database to update + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + maxTables: 300 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + maxColumnsPerTable: 150 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + retentionPeriod: 600000000000 + responses: + '200': + description: The cluster database was successfully updated. + content: + application/json: + schema: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the database belongs to + $ref: '#/components/schemas/UuidV4' + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + required: + - accountId + - clusterId + - maxTables + - maxColumnsPerTable + - retentionPeriod + - name + examples: + allFields: + summary: Update All Fields + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 200 + retentionPeriod: 0 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 150 + retentionPeriod: 0 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 600000000000 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteClusterDatabase + summary: Delete a database + tags: + - Databases + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster that the database belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: databaseName + in: path + description: The name of the database to delete + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + responses: + '204': + description: The cluster database was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + - target: $.paths['/accounts/{accountId}/clusters/{clusterId}/databases/{databaseName}/tables'] + update: + post: + operationId: CreateClusterDatabaseTable + summary: Create a database table + description: | + Create a table. The database must already exist. With InfluxDB Cloud Dedicated, tables and measurements are synonymous. + + Typically, tables are created automatically on write using the measurement name + specified in line protocol written to InfluxDB. + However, to apply a [custom partition template](/influxdb3/cloud-dedicated/admin/custom-partitions/) + to a table, you must manually [create the table with custom partitioning](/influxdb3/cloud-dedicated/admin/tables/#create-a-table-with-custom-partitioning) before you write any data to it. + + Partitioning defaults to `%Y-%m-%d` (daily). + When a partition template is applied to a database, it becomes the default template + for all tables in that database, but can be overridden when creating a + table. + tags: + - Tables + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the database table for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster to create the database table for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: databaseName + in: path + description: The name of the database to create the database table for + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: TableOne + allFields: + summary: All Fields + value: + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database table was successfully created + content: + application/json: + schema: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the database table belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the database table belongs to + $ref: '#/components/schemas/UuidV4' + databaseName: + description: The name of the database that the database table belongs to + $ref: '#/components/schemas/ClusterDatabaseName' + name: + description: The name of the database table + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - accountId + - clusterId + - databaseName + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + databaseName: DatabaseOne + name: TableOne + allFields: + summary: All Fields + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + databaseName: DatabaseOne + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + - target: $.paths['/accounts/{accountId}/clusters/{clusterId}/tokens'] + update: + get: + operationId: GetDatabaseTokens + summary: Get all database tokens for a cluster + tags: + - Database tokens + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to get the [database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/) for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster to get the [database tokens](/influxdb3/cloud-dedicated/admin/tokens/database/) for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '200': + description: The database tokens were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - accountId + - clusterId + - id + - description + - permissions + - createdAt + example: + - accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + - accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + - accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + post: + operationId: CreateDatabaseToken + summary: Create a database token + tags: + - Database tokens + description: | + Create a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for a cluster. + + The token returned on the `accessToken` property in the response can be used to authenticate query and write requests to the cluster. + + ### Notable behaviors + + - InfluxDB might take some time--from a few seconds to a few minutes--to activate and synchronize new tokens. If a new database token doesn't immediately work (you receive a `401 Unauthorized` error) for querying or writing, wait and then try your request again. + + - Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os). + + If you lose a token, [delete the token from InfluxDB](/influxdb3/cloud-dedicated/admin/tokens/database/delete/) and create a new one. + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) to create the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster to create the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) for + required: true + schema: + $ref: '#/components/schemas/UuidV4' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + required: + - description + examples: + limitedAccessToken: + summary: Limited Access Token + value: + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + fullAccessToken: + summary: Full Access Token + value: + description: Full Access Token + permissions: + - action: write + resource: '*' + noAccessToken: + summary: No Access Token + value: + description: No Access Token + permissions: [] + responses: + '200': + description: The database token was successfully created + content: + application/json: + schema: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + accessToken: + $ref: '#/components/schemas/DatabaseTokenAccessToken' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - accountId + - clusterId + - id + - description + - permissions + - createdAt + - accessToken + examples: + limitedAccessToken: + summary: Limited Access Token + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + accessToken: apiv1_5555555555555555555555555555555555555555555555555555555555555555 + fullAccessToken: + summary: Full Access Token + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_6666666666666666666666666666666666666666666666666666666666666666 + noAccessToken: + summary: No Access Token + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 66666666-6666-4666-8666-666666666666 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_7777777777777777777777777777777777777777777777777777777777777777 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + create_token () { + local description=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "'$description'", + "permissions": [ + { + "action": "read", + "resource": "DatabaseOne" + }, + { + "action": "write", + "resource": "DatabaseTwo" + } + ] + }' \ + ) + echo "$response" + } + - target: $.paths['/accounts/{accountId}/clusters/{clusterId}/tokens/{tokenId}'] + update: + get: + operationId: GetDatabaseToken + summary: Get a database token + description: | + Retrieve metadata details for a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/). + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + + Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. + tags: + - Database tokens + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to get + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '200': + description: The database token was successfully retrieved. + content: + application/json: + schema: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - accountId + - clusterId + - id + - description + - permissions + - createdAt + examples: + limitedAccessToken: + summary: Limited Access Token + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + fullAccessToken: + summary: Full Access Token + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + noAccessToken: + summary: No Access Token + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + get_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + patch: + operationId: UpdateDatabaseToken + summary: Update a database token + description: | + Update the description and permissions of a [database token](/influxdb3/cloud-dedicated/admin/tokens/database/). + tags: + - Database tokens + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to update + schema: + $ref: '#/components/schemas/UuidV4' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + descriptionOnly: + summary: Update Description Only + value: + description: Updated Limited Access Token + permissionsOnly: + summary: Update Permissions Only + value: + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + removeAllPermissions: + summary: Remove All Permissions + value: + permissions: [] + responses: + '200': + description: The database token was successfully updated + content: + application/json: + schema: + type: object + properties: + accountId: + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + clusterId: + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + $ref: '#/components/schemas/UuidV4' + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - accountId + - clusterId + - id + - description + - permissions + - createdAt + examples: + allFields: + summary: Update All Fields + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + descriptionOnly: + summary: Update Description Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + permissionsOnly: + summary: Update Permissions Only + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + removeAllPermissions: + summary: Remove All Permissions + value: + accountId: 11111111-1111-4111-8111-111111111111 + clusterId: 33333333-3333-4333-8333-333333333333 + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: [] + createdAt: '2023-12-21T17:32:28.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + update_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "Updated Limited Access Token", + "permissions": [ + { + "action": "write", + "resource": "DatabaseOne" + }, + { + "action": "read", + "resource": "DatabaseTwo" + }, + { + "action": "write", + "resource": "DatabaseThree" + } + ] + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteDatabaseToken + summary: Delete a database token + tags: + - Database tokens + parameters: + - name: accountId + in: path + description: The ID of the [account](/influxdb3/cloud-dedicated/get-started/setup/#request-an-influxdb-cloud-dedicated-cluster) that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: clusterId + in: path + description: The ID of the cluster that the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) belongs to + required: true + schema: + $ref: '#/components/schemas/UuidV4' + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/cloud-dedicated/admin/tokens/database/) to delete + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '204': + description: The database token was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://console.influxdata.com" + + delete_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/accounts/$ACCOUNT_ID/clusters/$CLUSTER_ID/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } diff --git a/api-docs/influxdb3/cloud-dedicated/v2/ref.yml b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml index f4b3e76fe5..556963d06e 100644 --- a/api-docs/influxdb3/cloud-dedicated/v2/ref.yml +++ b/api-docs/influxdb3/cloud-dedicated/v2/ref.yml @@ -98,7 +98,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers diff --git a/api-docs/influxdb3/cloud-serverless/v2/ref.yml b/api-docs/influxdb3/cloud-serverless/v2/ref.yml index 3b8ac502e0..154ecab861 100644 --- a/api-docs/influxdb3/cloud-serverless/v2/ref.yml +++ b/api-docs/influxdb3/cloud-serverless/v2/ref.yml @@ -159,7 +159,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes. | | `Content-Type` | string | The format of the data in the request body. | name: Headers diff --git a/api-docs/influxdb3/clustered/management/content/info.yml b/api-docs/influxdb3/clustered/management/content/info.yml deleted file mode 100644 index 0d324fadb5..0000000000 --- a/api-docs/influxdb3/clustered/management/content/info.yml +++ /dev/null @@ -1,15 +0,0 @@ -title: InfluxDB 3 Clustered Management API -x-influxdata-short-title: Management API -description: | - The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster. - The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. - - This documentation is generated from the - InfluxDB 3 Management API OpenAPI specification. -license: - name: MIT - url: 'https://opensource.org/licenses/MIT' -contact: - name: InfluxData - url: https://www.influxdata.com - email: support@influxdata.com \ No newline at end of file diff --git a/api-docs/influxdb3/clustered/management/content/servers.yml b/api-docs/influxdb3/clustered/management/content/servers.yml deleted file mode 100644 index edec580b8a..0000000000 --- a/api-docs/influxdb3/clustered/management/content/servers.yml +++ /dev/null @@ -1,8 +0,0 @@ -- url: 'https://{baseurl}/api/v0' - description: InfluxDB 3 Clustered Management API URL - variables: - baseurl: - enum: - - 'console.influxdata.com' - default: 'console.influxdata.com' - description: InfluxDB 3 Clustered Console URL diff --git a/api-docs/influxdb3/clustered/management/content/tag-groups.yml b/api-docs/influxdb3/clustered/management/content/tag-groups.yml deleted file mode 100644 index 57e8c8484c..0000000000 --- a/api-docs/influxdb3/clustered/management/content/tag-groups.yml +++ /dev/null @@ -1,6 +0,0 @@ -- name: Using the Management API - tags: - - Authentication - - Quickstart -- name: All endpoints - tags: [] diff --git a/api-docs/influxdb3/clustered/management/openapi.yml b/api-docs/influxdb3/clustered/management/openapi.yml index 410d10fc8f..2eee26276d 100644 --- a/api-docs/influxdb3/clustered/management/openapi.yml +++ b/api-docs/influxdb3/clustered/management/openapi.yml @@ -790,11 +790,11 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - id - description @@ -878,7 +878,7 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - description examples: @@ -919,13 +919,13 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' accessToken: $ref: '#/components/schemas/DatabaseTokenAccessToken' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - id - description @@ -1034,11 +1034,11 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - id - description @@ -1168,11 +1168,11 @@ paths: permissions: $ref: '#/components/schemas/DatabaseTokenPermissions' createdAt: - $ref: '#/components/schemas/DatabaseTokenCreatedAt' + $ref: '#/components/schemas/DateTimeRfc3339' expiresAt: - $ref: '#/components/schemas/DatabaseTokenExpiresAt' + $ref: '#/components/schemas/DateTimeRfc3339' revokedAt: - $ref: '#/components/schemas/DatabaseTokenRevokedAt' + $ref: '#/components/schemas/DateTimeRfc3339' required: - id - description @@ -1359,7 +1359,7 @@ components: minLength: 1 ClusterDatabaseRetentionPeriod: description: | - The retention period of the [cluster database](/influxdb3/clustered/admin/databases/) in nanoseconds, if applicable + The retention period of the [cluster database](/influxdb/version/admin/databases/) in nanoseconds, if applicable If the retention period is not set or is set to 0, the database will have infinite retention type: integer @@ -1389,9 +1389,9 @@ components: minimum: 1 ClusterDatabasePartitionTemplate: description: | - A template for [partitioning](/influxdb3/clustered/admin/custom-partitions/) a cluster database. + A template for [partitioning](/influxdb/version/admin/custom-partitions/) a cluster database. - Each template part is evaluated in sequence, concatinating the final + Each template part is evaluated in sequence, concatenating the final partition key from the output of each part, delimited by the partition key delimiter `|`. @@ -1433,7 +1433,7 @@ components: * `time=2023-01-01, a=` -> `2023|#|!|!` * `time=2023-01-01, c=` -> `2023|!|!|` - When using the default [partitioning](/influxdb3/clustered/admin/custom-partitions/) template (YYYY-MM-DD) there is no + When using the default [partitioning](/influxdb/version/admin/custom-partitions/) template (YYYY-MM-DD) there is no encoding necessary, as the derived partition key contains a single part, and no reserved characters. [`TemplatePart::Bucket`] parts by definition will always be within the part length limit and contain no restricted characters @@ -1535,7 +1535,7 @@ components: tagName: c numberOfBuckets: 10 ClusterDatabaseTableName: - description: The name of the [cluster database](/influxdb3/clustered/admin/databases/) table + description: The name of the [cluster database](/influxdb/version/admin/databases/) table type: string examples: - TableOne @@ -1548,15 +1548,15 @@ components: - Limited Access Token - Full Access Token DatabaseTokenResourceAllDatabases: - description: A resource value for a [database token](/influxdb3/clustered/admin/tokens/database/) permission that refers to all databases + description: A resource value for a [database token](/influxdb/version/admin/tokens/database/) permission that refers to all databases type: string enum: - '*' DatabaseTokenPermissionAction: - description: The action the [database token](/influxdb3/clustered/admin/tokens/database/) permission allows + description: The action the [database token](/influxdb/version/admin/tokens/database/) permission allows type: string DatabaseTokenPermissionResource: - description: The resource the [database token](/influxdb3/clustered/admin/tokens/database/) permission applies to + description: The resource the [database token](/influxdb/version/admin/tokens/database/) permission applies to anyOf: - $ref: '#/components/schemas/ClusterDatabaseName' - $ref: '#/components/schemas/DatabaseTokenResourceAllDatabases' @@ -1580,7 +1580,7 @@ components: - action: write resource: '*' DatabaseTokenPermissions: - description: The list of permissions the [database token](/influxdb3/clustered/admin/tokens/database/) allows + description: The list of permissions the [database token](/influxdb/version/admin/tokens/database/) allows type: array items: $ref: '#/components/schemas/DatabaseTokenPermission' @@ -1593,7 +1593,7 @@ components: resource: '*' DatabaseTokenCreatedAt: description: | - The date and time that the [database token](/influxdb3/clustered/admin/tokens/database/) was created + The date and time that the [database token](/influxdb/version/admin/tokens/database/) was created Uses RFC3339 format $ref: '#/components/schemas/DateTimeRfc3339' diff --git a/api-docs/influxdb3/clustered/management/overlay.yml b/api-docs/influxdb3/clustered/management/overlay.yml new file mode 100644 index 0000000000..75927fc2e8 --- /dev/null +++ b/api-docs/influxdb3/clustered/management/overlay.yml @@ -0,0 +1,1323 @@ +overlay: 1.0.0 +info: + title: Clustered Management API overlay + version: 1.0.0 +actions: + - target: $.info + update: + title: InfluxDB 3 Clustered Management API + description: | + The Management API for InfluxDB 3 Clustered provides a programmatic interface for managing an InfluxDB 3 cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB 3 Management API OpenAPI specification. + version: '' + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + - target: $.servers + update: + - url: https://{baseurl}/api/v0 + description: InfluxDB 3 Clustered Management API URL + variables: + baseurl: + enum: + - console.influxdata.com + default: console.influxdata.com + description: InfluxDB 3 Clustered Console URL + - target: $.tags + update: + - name: Authentication + x-traitTag: true + description: | + With InfluxDB 3 Clustered, InfluxDB Management API endpoints require the following credential: + + - `Authorization MANAGEMENT_TOKEN`: the `Authorization` HTTP header with a [management token](/influxdb3/clustered/admin/tokens/management/). + + See how to [create a management token](/influxdb3/clustered/admin/tokens/management/). + + By default, management tokens in InfluxDB 3 are short-lived tokens issued by an OAuth2 identity provider that grant a specific user administrative access to your InfluxDB cluster. However, for automation purposes, you can manually create management tokens that authenticate directly with your InfluxDB cluster and do not require human interaction with your identity provider. + - name: Database tokens + description: Manage database read/write tokens for a cluster + - name: Databases + description: Manage databases for a cluster + - name: Quickstart + x-traitTag: true + description: | + The following example script shows how to use `curl` to make database and token management requests: + + ```shell + #!/bin/bash + + # Usage: + # Note the leading space in the command below to keep secrets out of the shell history + # + # ``` + # MANAGEMENT_TOKEN= ./scripts/test_http_api_v0_endpoints.sh + # ``` + + # Env var validation + if [ -z "${MANAGEMENT_TOKEN}" ]; then + echo " + [Error]: ❌ + \$MANAGEMENT_TOKEN env var is required. + " + exit 1 + fi + + HOST="https://cluster-host.com" + + # Database request functions + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + # Token request functions + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + create_token () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my test token", + "permissions": [ + { + "action": "write", + "resource": "database_one" + }, + { + "action": "read", + "resource": "database_two" + } + ] + }' \ + ) + echo "$response" + } + + get_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + update_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "my updated test token", + "permissions": [ + { + "action": "database_one", + "resource": "read" + } + ] + }' \ + ) + echo "$response" + } + + delete_token () { + local token_id=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + + + # Test database endpoints + databaseName="test_database_$RANDOM" + + printf "\n🏗️ Creating database... 🏗️\n\n" + response="$(create_database $databaseName)" + echo $response | jq + printf "\n🏗️ Creating database successful 🏗️\n\n" + + printf "\n⬆️ Updating database... ⬆️\n\n" + response="$(update_database $databaseName)" + echo $response | jq + printf "\n⬆️ Updating database successful ⬆️\n\n" + + printf "\n⬇️ Listing databases... ⬇️\n\n" + response="$(list_databases)" + echo $response | jq + printf "\n⬇️ Listing databases successful ⬇️\n\n" + + printf "\n🗑️ Deleting database... 🗑️\n\n" + response="$(delete_database $databaseName)" + echo $response | jq + printf "\n🗑️ Deleting database successful 🗑️\n\n" + + + # Test token endpoints + printf "\n🏗️ Creating token... 🏗️\n\n" + response="$(create_token)" + echo $response | jq + tokenId=$(echo $response | jq '.id') + printf "\n🏗️ Creating token successful 🏗️\n\n" + + printf "\n⬇️ Getting token... ⬇️\n\n" + response="$(get_token $tokenId)" + echo $response | jq + printf "\n⬇️ Getting token successful ⬇️\n\n" + + printf "\n⬆️ Updating token... ⬆️\n\n" + response="$(update_token $tokenId)" + echo $response | jq + printf "\n⬆️ Updating token successful ⬆️\n\n" + + printf "\n📋 Listing tokens... 📋\n\n" + response="$(list_tokens)" + echo $response | jq + printf "\n📋 Listing tokens successful 📋\n\n" + + printf "\n🗑️ Deleting token... 🗑️\n\n" + response="$(delete_token $tokenId)" + echo $response | jq + printf "\n🗑️ Deleting token successful 🗑️\n\n" + ``` + - name: Tables + description: Manage tables in a database + - target: $.paths['/databases'] + update: + get: + operationId: GetClusterDatabases + summary: Get all databases for a cluster + responses: + '200': + description: The cluster databases were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + example: + - name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + - name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: '' + lang: Shell + source: | + HOST="https://cluster-host.com" + + list_databases () { + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + tags: + - Databases + post: + operationId: CreateClusterDatabase + summary: Create a database + tags: + - Databases + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database was successfully created + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + - maxTables + - maxColumnsPerTable + - retentionPeriod + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 0 + allFields: + summary: All Fields + value: + name: DatabaseTwo + maxTables: 100 + maxColumnsPerTable: 50 + retentionPeriod: 300000000000 + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + create_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "name": "'$databaseName'", + "maxTables": 75, + "maxColumnsPerTable": 90, + "retentionPeriod": 600000000000, + "partitionTemplate": [ + { + "type": "tag", + "value": "abc" + }, + { + "type": "bucket", + "value": { + "tagName": "def", + "numberOfBuckets": 5 + } + } + ] + }' \ + ) + echo "$response" + } + - target: $.paths['/databases/{databaseName}'] + update: + patch: + operationId: UpdateClusterDatabase + summary: Update a database + tags: + - Databases + parameters: + - name: databaseName + in: path + description: The name of the database to update + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + maxTables: 300 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + maxColumnsPerTable: 150 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + retentionPeriod: 600000000000 + responses: + '200': + description: The cluster database was successfully updated. + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseName' + maxTables: + $ref: '#/components/schemas/ClusterDatabaseMaxTables' + maxColumnsPerTable: + $ref: '#/components/schemas/ClusterDatabaseMaxColumnsPerTable' + retentionPeriod: + $ref: '#/components/schemas/ClusterDatabaseRetentionPeriod' + required: + - maxTables + - maxColumnsPerTable + - retentionPeriod + - name + examples: + allFields: + summary: Update All Fields + value: + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 150 + retentionPeriod: 600000000000 + maxTablesOnly: + summary: Update Max Tables Only + value: + name: DatabaseOne + maxTables: 300 + maxColumnsPerTable: 200 + retentionPeriod: 0 + maxColumnsPerTableOnly: + summary: Update Max Columns Per Table Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 150 + retentionPeriod: 0 + retentionPeriodOnly: + summary: Update Retention Period Only + value: + name: DatabaseOne + maxTables: 500 + maxColumnsPerTable: 200 + retentionPeriod: 600000000000 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + update_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "maxTables": 150, + "maxColumnsPerTable": 180, + "retentionPeriod": 1200000000000 + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteClusterDatabase + summary: Delete a database + tags: + - Databases + parameters: + - name: databaseName + in: path + description: The name of the database to delete + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + responses: + '204': + description: The cluster database was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + delete_database () { + local databaseName=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/databases/$databaseName" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + - target: $.paths['/databases/{databaseName}/tables'] + update: + post: + operationId: CreateClusterDatabaseTable + summary: Create a database table + tags: + - Tables + parameters: + - name: databaseName + in: path + description: The name of the database to create the database table for + required: true + schema: + $ref: '#/components/schemas/ClusterDatabaseName' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + name: + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + name: TableOne + allFields: + summary: All Fields + value: + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + responses: + '200': + description: The cluster database table was successfully created + content: + application/json: + schema: + type: object + properties: + databaseName: + description: The name of the database that the database table belongs to + $ref: '#/components/schemas/ClusterDatabaseName' + name: + description: The name of the database table + $ref: '#/components/schemas/ClusterDatabaseTableName' + partitionTemplate: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplate' + required: + - databaseName + - name + examples: + requiredFieldsOnly: + summary: Required Fields Only + value: + databaseName: DatabaseOne + name: TableOne + allFields: + summary: All Fields + value: + databaseName: DatabaseOne + name: TableTwo + partitionTemplate: + - type: time + value: '%Y' + - type: tag + value: a + - type: tag + value: c + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + - target: $.paths['/tokens'] + update: + get: + operationId: GetDatabaseTokens + summary: Get all database tokens for a cluster + tags: + - Database tokens + responses: + '200': + description: The database tokens were successfully retrieved + content: + application/json: + schema: + type: array + items: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + example: + - id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + - id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + - id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + list_tokens () { + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + post: + operationId: CreateDatabaseToken + summary: Create a database token + tags: + - Database tokens + description: | + Create a [database token](/influxdb3/clustered/admin/tokens/database/) for a cluster. + + The token returned on the `accessToken` property in the response can be used to authenticate query and write requests to the cluster. + + ### Notable behaviors + + - InfluxDB might take some time--from a few seconds to a few minutes--to activate and synchronize new tokens. If a new database token doesn't immediately work (you receive a `401 Unauthorized` error) for querying or writing, wait and then try your request again. + + - Token strings are viewable _only_ on token creation and aren't stored by InfluxDB; you can't recover a lost token. + + #### Store secure tokens in a secret store + + We recommend storing database tokens in a **secure secret store**. + For example, see how to [authenticate Telegraf using tokens in your OS secret store](https://github.com/influxdata/telegraf/tree/master/plugins/secretstores/os). + + If you lose a token, [delete the token from InfluxDB](/influxdb3/clustered/admin/tokens/database/delete/) and create a new one. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + required: + - description + examples: + limitedAccessToken: + summary: Limited Access Token + value: + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + fullAccessToken: + summary: Full Access Token + value: + description: Full Access Token + permissions: + - action: write + resource: '*' + noAccessToken: + summary: No Access Token + value: + description: No Access Token + permissions: [] + responses: + '200': + description: The database token was successfully created + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + accessToken: + $ref: '#/components/schemas/DatabaseTokenAccessToken' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + - accessToken + examples: + limitedAccessToken: + summary: Limited Access Token + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + accessToken: apiv1_5555555555555555555555555555555555555555555555555555555555555555 + fullAccessToken: + summary: Full Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_6666666666666666666666666666666666666666666666666666666666666666 + noAccessToken: + summary: No Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + accessToken: apiv1_7777777777777777777777777777777777777777777777777777777777777777 + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + create_token () { + local description=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens" \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "'$description'", + "permissions": [ + { + "action": "read", + "resource": "DatabaseOne" + }, + { + "action": "write", + "resource": "DatabaseTwo" + } + ] + }' \ + ) + echo "$response" + } + - target: $.paths['/tokens/{tokenId}'] + update: + get: + operationId: GetDatabaseToken + summary: Get a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to get + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '200': + description: The database token was successfully retrieved. + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + examples: + limitedAccessToken: + summary: Limited Access Token + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + fullAccessToken: + summary: Full Access Token + value: + id: 66666666-6666-4666-8666-666666666666 + description: Full Access Token + permissions: + - action: write + resource: '*' + createdAt: '2024-03-02T04:20:19.000Z' + noAccessToken: + summary: No Access Token + value: + id: 77777777-7777-4777-8777-777777777777 + description: No Access Token + permissions: [] + createdAt: '2024-03-02T04:20:19.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + get_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } + patch: + operationId: UpdateDatabaseToken + summary: Update a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to update + required: true + schema: + $ref: '#/components/schemas/UuidV4' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + minProperties: 1 + examples: + allFields: + summary: Update All Fields + value: + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + descriptionOnly: + summary: Update Description Only + value: + description: Updated Limited Access Token + permissionsOnly: + summary: Update Permissions Only + value: + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + removeAllPermissions: + summary: Remove All Permissions + value: + permissions: [] + responses: + '200': + description: The database token was successfully updated + content: + application/json: + schema: + type: object + properties: + id: + description: The ID of the database token + $ref: '#/components/schemas/UuidV4' + description: + $ref: '#/components/schemas/DatabaseTokenDescription' + permissions: + $ref: '#/components/schemas/DatabaseTokenPermissions' + createdAt: + $ref: '#/components/schemas/DatabaseTokenCreatedAt' + expiresAt: + $ref: '#/components/schemas/DatabaseTokenExpiresAt' + revokedAt: + $ref: '#/components/schemas/DatabaseTokenRevokedAt' + required: + - id + - description + - permissions + - createdAt + examples: + allFields: + summary: Update All Fields + value: + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + descriptionOnly: + summary: Update Description Only + value: + id: 55555555-5555-4555-8555-555555555555 + description: Updated Limited Access Token + permissions: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + createdAt: '2023-12-21T17:32:28.000Z' + permissionsOnly: + summary: Update Permissions Only + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: + - action: write + resource: DatabaseOne + - action: read + resource: DatabaseTwo + - action: write + resource: DatabaseThree + createdAt: '2023-12-21T17:32:28.000Z' + removeAllPermissions: + summary: Remove All Permissions + value: + id: 55555555-5555-4555-8555-555555555555 + description: Limited Access Token + permissions: [] + createdAt: '2023-12-21T17:32:28.000Z' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '409': + $ref: '#/components/responses/Conflict' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + update_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request PATCH \ + --header "Accept: application/json" \ + --header 'Content-Type: application/json' \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + --data '{ + "description": "Updated Limited Access Token", + "permissions": [ + { + "action": "write", + "resource": "DatabaseOne" + }, + { + "action": "read", + "resource": "DatabaseTwo" + }, + { + "action": "write", + "resource": "DatabaseThree" + } + ] + }' \ + ) + echo "$response" + } + delete: + operationId: DeleteDatabaseToken + summary: Delete a database token + tags: + - Database tokens + parameters: + - name: tokenId + in: path + description: The ID of the [database token](/influxdb3/clustered/admin/tokens/database/) to delete + required: true + schema: + $ref: '#/components/schemas/UuidV4' + responses: + '204': + description: The database token was successfully deleted + $ref: '#/components/responses/NoContent' + '400': + $ref: '#/components/responses/BadRequest' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + '404': + $ref: '#/components/responses/NotFound' + '500': + $ref: '#/components/responses/InternalServerError' + x-codeSamples: + - label: cURL + lang: Shell + source: | + HOST="https://cluster-host.com" + + delete_token () { + local tokenId=$1 + local response=$( \ + curl \ + --location "$HOST/api/v0/tokens/$tokenId" \ + --request DELETE \ + --header "Accept: application/json" \ + --header "Authorization: Bearer $MANAGEMENT_TOKEN" \ + ) + echo "$response" + } diff --git a/api-docs/influxdb3/clustered/v2/ref.yml b/api-docs/influxdb3/clustered/v2/ref.yml index a93a582f1f..c9a5931973 100644 --- a/api-docs/influxdb3/clustered/v2/ref.yml +++ b/api-docs/influxdb3/clustered/v2/ref.yml @@ -98,7 +98,7 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes, sent to the database. | | `Content-Type` | string | The format of the data in the request body. | name: Headers diff --git a/api-docs/influxdb3/core/v3/content/info.yml b/api-docs/influxdb3/core/v3/content/info.yml deleted file mode 100644 index 34e55186eb..0000000000 --- a/api-docs/influxdb3/core/v3/content/info.yml +++ /dev/null @@ -1,34 +0,0 @@ -title: InfluxDB 3 Core API Service -x-influxdata-short-title: InfluxDB 3 API -x-influxdata-version-matrix: - v1: Compatibility layer for InfluxDB 1.x clients (supported) - v2: Compatibility layer for InfluxDB 2.x clients (supported) - v3: Native API for InfluxDB 3.x (current) -x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance. -description: | - The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface for - interacting with InfluxDB 3 Core databases and resources. - Use this API to: - - - Write data to InfluxDB 3 Core databases - - Query data using SQL or InfluxQL - - Process data using Processing engine plugins - - Manage databases, tables, and Processing engine triggers - - Perform administrative tasks and access system information - - The API includes endpoints under the following paths: - - `/api/v3`: InfluxDB 3 Core native endpoints - - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - -license: - name: MIT - url: 'https://opensource.org/licenses/MIT' -contact: - name: InfluxData - url: https://www.influxdata.com - email: support@influxdata.com \ No newline at end of file diff --git a/api-docs/influxdb3/core/v3/content/servers.yml b/api-docs/influxdb3/core/v3/content/servers.yml deleted file mode 100644 index 213371401c..0000000000 --- a/api-docs/influxdb3/core/v3/content/servers.yml +++ /dev/null @@ -1,8 +0,0 @@ -- url: https://{baseurl} - description: InfluxDB 3 Core API URL - variables: - baseurl: - enum: - - 'localhost:8181' - default: 'localhost:8181' - description: InfluxDB 3 Core URL diff --git a/api-docs/influxdb3/core/v3/content/tag-groups.yml b/api-docs/influxdb3/core/v3/content/tag-groups.yml deleted file mode 100644 index 364d5e7940..0000000000 --- a/api-docs/influxdb3/core/v3/content/tag-groups.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Cache data - - Common parameters - - Response codes - - Compatibility endpoints - - Database - - Processing engine - - Server information - - Table - - Token - - Query data - - Write data diff --git a/api-docs/influxdb3/core/v3/overlay.yml b/api-docs/influxdb3/core/v3/overlay.yml new file mode 100644 index 0000000000..94b8ffe7c1 --- /dev/null +++ b/api-docs/influxdb3/core/v3/overlay.yml @@ -0,0 +1,70 @@ +overlay: 1.0.0 +info: + title: Core product overlay + version: 1.0.0 + +actions: + # Override info section + - target: $.info + update: + title: InfluxDB 3 Core API Service + x-influxdata-short-title: InfluxDB 3 API + x-influxdata-version-matrix: + v1: Compatibility layer for InfluxDB 1.x clients (supported) + v2: Compatibility layer for InfluxDB 2.x clients (supported) + v3: Native API for InfluxDB 3.x (current) + x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance. + description: | + The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface for + interacting with InfluxDB 3 Core databases and resources. + Use this API to: + + - Write data to InfluxDB 3 Core databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 Core native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + + license: + name: MIT + url: 'https://opensource.org/licenses/MIT' + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + + # Override servers[0].description + - target: $.servers[0].description + update: InfluxDB 3 Core API URL + + # Override servers[0].variables.baseurl.description + - target: $.servers[0].variables.baseurl.description + update: InfluxDB 3 Core URL + + # Set tag groups for navigation + - target: $.x-tagGroups + update: + - name: Using the InfluxDB HTTP API + tags: + - Quick start + - Authentication + - Cache data + - Common parameters + - Response codes + - Compatibility endpoints + - Database + - Processing engine + - Server information + - Table + - Token + - Query data + - Write data diff --git a/api-docs/influxdb3/core/v3/ref.yml b/api-docs/influxdb3/core/v3/ref.yml index 5b00fa7ae5..a2a0720b27 100644 --- a/api-docs/influxdb3/core/v3/ref.yml +++ b/api-docs/influxdb3/core/v3/ref.yml @@ -21,7 +21,7 @@ info: This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/). --> - version: '3.7.0' + version: 3.7.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -29,32 +29,31 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com + x-influxdata-short-title: InfluxDB 3 API + x-influxdata-version-matrix: + v1: Compatibility layer for InfluxDB 1.x clients (supported) + v2: Compatibility layer for InfluxDB 2.x clients (supported) + v3: Native API for InfluxDB 3.x (current) + x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance. servers: - - url: https://{baseurl} - description: InfluxDB 3 Core API URL - variables: - baseurl: - enum: - - localhost:8181 - default: localhost:8181 - description: InfluxDB 3 Core URL + - url: / security: - BearerAuthentication: [] - - TokenAuthentication: [] - - BasicAuthentication: [] - - QuerystringAuthentication: [] tags: + - name: Auth token + description: Manage tokens for authentication and authorization - name: Authentication description: | Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: | Authentication scheme | Works with | - |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | - + |:----------------------|:-----------| + | Bearer authentication | All endpoints | + | Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) | + | Basic authentication | v1 compatibility endpoints (`/write`, `/query`) | + | Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) | + + See the **Security Schemes** section below for details on each authentication method. x-traitTag: true - name: Cache data description: | @@ -87,35 +86,8 @@ tags: #### Related guides - - [Manage the Distinct Value Cache](/influxdb3/core/admin/distinct-value-cache/) - - [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/) - - name: Compatibility endpoints - description: | - InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. - - ### Write data using v1- or v2-compatible endpoints - - - [`/api/v2/write` endpoint](#operation/PostV2Write) - for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 write workloads to InfluxDB 3. - - For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). - - All endpoints accept the same line protocol format. - - ### Query data - - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads using InfluxQL. - - For new workloads, use one of the following: - - - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using InfluxQL. - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using Flight APIs, see [InfluxDB 3 client libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). - - ### Server information - - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. + - [Manage the Distinct Value Cache](/influxdb/version/admin/distinct-value-cache/) + - [Manage the Last Value Cache](/influxdb/version/admin/last-value-cache/) - name: Database description: Manage databases - description: | @@ -139,19 +111,33 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes. | | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true + - name: Migrate from InfluxDB v1 or v2 + x-traitTag: true + description: | + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools. + Operations marked with v1 or v2 badges are compatible with the respective InfluxDB version. + + ### Migration guides + + - [Migrate from InfluxDB v1](/influxdb/version/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x + - [Migrate from InfluxDB v2](/influxdb/version/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud + - [Use compatibility APIs to write data](/influxdb/version/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints + - [Use the v1 HTTP query API](/influxdb/version/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP - name: Processing engine description: | Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Core provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide. + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb/version/processing-engine/) guide. - name: Query data description: Query data using SQL or InfluxQL - name: Quick start @@ -195,14 +181,12 @@ tags: {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} ``` - For more information about using InfluxDB 3 Core, see the [Get started](/influxdb3/core/get-started/) guide. + For more information about using InfluxDB 3, see the [Get started](/influxdb/version/get-started/) guide. x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information - name: Table description: Manage table schemas and data - - name: Token - description: Manage tokens for authentication and authorization - name: Write data description: | Write data to InfluxDB 3 using line protocol format. @@ -229,17 +213,13 @@ paths: post: operationId: PostV1Write summary: Write line protocol (v1-compatible) + x-compatibility-version: v1 description: | Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Related - - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/compatibilityPrecisionParam' @@ -306,7 +286,7 @@ paths: The response body: - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/core/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + - contains details about the [rejected points](/influxdb/version/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -342,27 +322,27 @@ paths: description: Access denied. '413': description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - - Compatibility endpoints - Write data x-influxdata-guides: - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ + href: /influxdb/version/write-data/http-api/compatibility-apis/ /api/v2/write: post: operationId: PostV2Write summary: Write line protocol (v2-compatible) + x-compatibility-version: v2 description: | Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Related - - - [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ parameters: - name: Content-Type in: header @@ -435,12 +415,14 @@ paths: description: Access denied. '413': description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] tags: - - Compatibility endpoints - Write data x-influxdata-guides: - title: Use compatibility APIs to write data - href: /influxdb3/core/write-data/http-api/compatibility-apis/ + href: /influxdb/version/write-data/http-api/compatibility-apis/ /api/v3/write_lp: post: operationId: PostWriteLP @@ -448,10 +430,10 @@ paths: description: | Writes line protocol to the specified database. - This is the native InfluxDB 3 Core write endpoint that provides enhanced control + This is the native InfluxDB 3 write endpoint that provides enhanced control over write behavior with advanced parameters for high-performance and fault-tolerant operations. - Use this endpoint to send data in [line protocol](/influxdb3/core/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. #### Features @@ -472,7 +454,7 @@ paths: #### Related - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/core/write-data/http-api/v3-write-lp/) + - [Use the InfluxDB v3 write_lp API to write data](/influxdb/version/write-data/http-api/v3-write-lp/) parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/accept_partial' @@ -735,15 +717,13 @@ paths: get: operationId: GetV1ExecuteQuery summary: Execute InfluxQL query (v1-compatible) + x-compatibility-version: v1 description: | Executes an InfluxQL query to retrieve data from the specified database. - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - Use query parameters to specify the database and the InfluxQL query. - - #### Related - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + Compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ parameters: - name: Accept in: header @@ -797,8 +777,8 @@ paths: type: string - name: epoch description: | - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/version/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/version/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query schema: $ref: '#/components/schemas/EpochCompatibility' @@ -859,21 +839,24 @@ paths: description: Method not allowed. '422': description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - Query data - - Compatibility endpoints x-influxdata-guides: - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ + href: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ post: operationId: PostExecuteV1Query summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - #### Related - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) + x-compatibility-version: v1 + description: Executes an InfluxQL query to retrieve data from the specified database. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ requestBody: content: application/json: @@ -907,8 +890,8 @@ paths: - `u` or `µ` for microseconds - `ns` for nanoseconds - Formats timestamps as [unix (epoch) timestamps](/influxdb3/core/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/core/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/version/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/version/reference/glossary/#rfc3339-timestamp) with nanosecond precision. enum: - ns - u @@ -978,12 +961,16 @@ paths: description: Method not allowed. '422': description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - Query data - - Compatibility endpoints x-influxdata-guides: - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/core/query-data/execute-queries/influxdb-v1-api/ + href: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ /health: get: operationId: GetHealth @@ -999,7 +986,8 @@ paths: /api/v1/health: get: operationId: GetHealthV1 - summary: Health check (v1) + summary: Health check (v1-compatible) + x-compatibility-version: v1 description: Checks the status of the service. responses: '200': @@ -1008,7 +996,6 @@ paths: description: Service is unavailable. tags: - Server information - - Compatibility endpoints /ping: get: operationId: GetPing @@ -1196,7 +1183,6 @@ paths: description: Creates a distinct cache for a table. tags: - Cache data - - Table requestBody: required: true content: @@ -1242,7 +1228,6 @@ paths: description: Cache not found. tags: - Cache data - - Table /api/v3/configure/last_cache: post: operationId: PostConfigureLastCache @@ -1267,7 +1252,6 @@ paths: description: Cache already exists. tags: - Cache data - - Table delete: operationId: DeleteConfigureLastCache summary: Delete last cache @@ -1297,7 +1281,6 @@ paths: description: Cache not found. tags: - Cache data - - Table /api/v3/configure/processing_engine_trigger: post: operationId: PostConfigureProcessingEngineTrigger @@ -1307,7 +1290,7 @@ paths: ### Related guides - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Processing engine and Python plugins](/influxdb/version/plugins/) requestBody: required: true content: @@ -1523,7 +1506,7 @@ paths: ### Related guides - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Processing engine and Python plugins](/influxdb/version/plugins/) parameters: - $ref: '#/components/parameters/ContentType' requestBody: @@ -1573,7 +1556,7 @@ paths: ### Related - - [Processing engine and Python plugins](/influxdb3/core/plugins/) + - [Processing engine and Python plugins](/influxdb/version/plugins/) - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) parameters: - $ref: '#/components/parameters/ContentType' @@ -1737,9 +1720,11 @@ paths: $ref: '#/components/schemas/AdminTokenObject' '401': $ref: '#/components/responses/Unauthorized' + security: + - BearerAuthentication: [] + - {} tags: - - Authentication - - Token + - Auth token /api/v3/configure/token/admin/regenerate: post: operationId: PostRegenerateAdminToken @@ -1758,7 +1743,7 @@ paths: $ref: '#/components/responses/Unauthorized' tags: - Authentication - - Token + - Auth token /api/v3/configure/token: delete: operationId: DeleteToken @@ -1782,7 +1767,7 @@ paths: description: Token not found. tags: - Authentication - - Token + - Auth token /api/v3/configure/token/named_admin: post: operationId: PostCreateNamedAdminToken @@ -1813,7 +1798,7 @@ paths: description: A token with this name already exists. tags: - Authentication - - Token + - Auth token /api/v3/plugins/files: put: operationId: PutPluginFile @@ -2074,8 +2059,8 @@ components: #### Related - - [Use the HTTP API and client libraries to write data](/influxdb3/core/write-data/api-client-libraries/) - - [Data durability](/influxdb3/core/reference/internals/durability/) + - [Use the HTTP API and client libraries to write data](/influxdb/version/write-data/api-client-libraries/) + - [Data durability](/influxdb/version/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms @@ -2507,6 +2492,36 @@ components: description: The current status of the license. example: active description: Response schema for license information. + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + resource_identifier: + type: array + items: + type: string + actions: + type: array + items: + type: string + enum: + - read + - write + expiry_secs: + type: integer + description: The expiration time in seconds. + description: Response schema for resource token creation. responses: Unauthorized: description: Unauthorized access. @@ -2572,13 +2587,13 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`DATABASE_NAME`**: your InfluxDB 3 database - **`AUTH_TOKEN`**: an admin token #### Related guides - - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/core/admin/tokens/) + - [Authenticate v1 API requests](/influxdb/version/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb/version/admin/tokens/) QuerystringAuthentication: type: apiKey in: query @@ -2608,7 +2623,7 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Core database + - **`DATABASE_NAME`**: your InfluxDB 3 database - **`AUTH_TOKEN`**: an admin token ```bash @@ -2629,12 +2644,12 @@ components: Replace the following: - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: an [admin token](/influxdb3/core/admin/tokens/) + - **`AUTH_TOKEN`**: an [admin token](/influxdb/version/admin/tokens/) #### Related guides - - [Authenticate v1 API requests](/influxdb3/core/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/core/admin/tokens/) + - [Authenticate v1 API requests](/influxdb/version/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb/version/admin/tokens/) BearerAuthentication: type: http scheme: bearer @@ -2693,7 +2708,7 @@ components: ### Related guides - - [Manage tokens](/influxdb3/core/admin/tokens/) + - [Manage tokens](/influxdb/version/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/api-docs/influxdb3/enterprise/v3/content/info.yml b/api-docs/influxdb3/enterprise/v3/content/info.yml deleted file mode 100644 index e4ec8ef609..0000000000 --- a/api-docs/influxdb3/enterprise/v3/content/info.yml +++ /dev/null @@ -1,34 +0,0 @@ -title: InfluxDB 3 Enterprise API Service -x-influxdata-short-title: InfluxDB 3 API -x-influxdata-version-matrix: - v1: Compatibility layer for InfluxDB 1.x clients (supported) - v2: Compatibility layer for InfluxDB 2.x clients (supported) - v3: Native API for InfluxDB 3.x (current) -x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance. -description: | - The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for - interacting with InfluxDB 3 Enterprise databases and resources. - Use this API to: - - - Write data to InfluxDB 3 Enterprise databases - - Query data using SQL or InfluxQL - - Process data using Processing engine plugins - - Manage databases, tables, and Processing engine triggers - - Perform administrative tasks and access system information - - The API includes endpoints under the following paths: - - `/api/v3`: InfluxDB 3 Enterprise native endpoints - - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients - - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients - - -license: - name: MIT - url: 'https://opensource.org/licenses/MIT' -contact: - name: InfluxData - url: https://www.influxdata.com - email: support@influxdata.com \ No newline at end of file diff --git a/api-docs/influxdb3/enterprise/v3/content/servers.yml b/api-docs/influxdb3/enterprise/v3/content/servers.yml deleted file mode 100644 index 29f1a6e695..0000000000 --- a/api-docs/influxdb3/enterprise/v3/content/servers.yml +++ /dev/null @@ -1,8 +0,0 @@ -- url: https://{baseurl} - description: InfluxDB 3 Enterprise API URL - variables: - baseurl: - enum: - - 'localhost:8181' - default: 'localhost:8181' - description: InfluxDB 3 Enterprise URL diff --git a/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml b/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml deleted file mode 100644 index 364d5e7940..0000000000 --- a/api-docs/influxdb3/enterprise/v3/content/tag-groups.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: Using the InfluxDB HTTP API - tags: - - Quick start - - Authentication - - Cache data - - Common parameters - - Response codes - - Compatibility endpoints - - Database - - Processing engine - - Server information - - Table - - Token - - Query data - - Write data diff --git a/api-docs/influxdb3/enterprise/v3/overlay.yml b/api-docs/influxdb3/enterprise/v3/overlay.yml new file mode 100644 index 0000000000..b276b8011d --- /dev/null +++ b/api-docs/influxdb3/enterprise/v3/overlay.yml @@ -0,0 +1,175 @@ +overlay: 1.0.0 +info: + title: Enterprise product overlay + version: 1.0.0 + +actions: + # Override info section + - target: $.info + update: + title: InfluxDB 3 Enterprise API Service + x-influxdata-short-title: InfluxDB 3 API + x-influxdata-version-matrix: + v1: Compatibility layer for InfluxDB 1.x clients (supported) + v2: Compatibility layer for InfluxDB 2.x clients (supported) + v3: Native API for InfluxDB 3.x (current) + x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance. + description: | + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface for + interacting with InfluxDB 3 Enterprise databases and resources. + Use this API to: + + - Write data to InfluxDB 3 Enterprise databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 Enterprise native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + + license: + name: MIT + url: 'https://opensource.org/licenses/MIT' + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com + + # Override servers[0].description + - target: $.servers[0].description + update: InfluxDB 3 Enterprise API URL + + # Override servers[0].variables.baseurl.description + - target: $.servers[0].variables.baseurl.description + update: InfluxDB 3 Enterprise URL + + # Set tag groups for navigation + - target: $.x-tagGroups + update: + - name: Using the InfluxDB HTTP API + tags: + - Quick start + - Authentication + - Cache data + - Common parameters + - Response codes + - Compatibility endpoints + - Database + - Processing engine + - Server information + - Table + - Token + - Query data + - Write data + + # Enterprise-only: Add PATCH method to /api/v3/configure/table + - target: $.paths['/api/v3/configure/table'].patch + update: + operationId: PatchConfigureTable + summary: Update a table + description: | + Updates table configuration, such as retention period. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateTableRequest' + responses: + '200': + description: Success. The table has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Table not found. + tags: + - Table + + # Enterprise-only: Add /api/v3/configure/database/{db} path + - target: $.paths['/api/v3/configure/database/{db}'] + update: + patch: + operationId: PatchConfigureDatabase + summary: Update a database + description: | + Updates database configuration, such as retention period. + parameters: + - name: db + in: path + required: true + schema: + type: string + description: The name of the database to update. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UpdateDatabaseRequest' + responses: + '200': + description: Success. The database has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + + # Enterprise-only: Add /api/v3/show/license path + - target: $.paths['/api/v3/show/license'] + update: + get: + operationId: GetShowLicense + summary: Show license information + description: | + Retrieves information about the current InfluxDB 3 Enterprise license. + responses: + '200': + description: Success. The response body contains license information. + content: + application/json: + schema: + $ref: '#/components/schemas/LicenseResponse' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + tags: + - Server information + + # Enterprise-only: Add /api/v3/configure/enterprise/token path + - target: $.paths['/api/v3/configure/enterprise/token'] + update: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + '201': + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/ResourceTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Auth token diff --git a/api-docs/influxdb3/enterprise/v3/ref.yml b/api-docs/influxdb3/enterprise/v3/ref.yml index a7ab0c8f57..5d2b67fb2b 100644 --- a/api-docs/influxdb3/enterprise/v3/ref.yml +++ b/api-docs/influxdb3/enterprise/v3/ref.yml @@ -21,7 +21,7 @@ info: This documentation is generated from the [InfluxDB OpenAPI specification](https://raw.githubusercontent.com/influxdata/). --> - version: '3.7.0' + version: 3.7.0 license: name: MIT url: https://opensource.org/licenses/MIT @@ -29,32 +29,31 @@ info: name: InfluxData url: https://www.influxdata.com email: support@influxdata.com + x-influxdata-short-title: InfluxDB 3 API + x-influxdata-version-matrix: + v1: Compatibility layer for InfluxDB 1.x clients (supported) + v2: Compatibility layer for InfluxDB 2.x clients (supported) + v3: Native API for InfluxDB 3.x (current) + x-influxdata-short-description: The InfluxDB 3 HTTP API provides a programmatic interface for interactions with InfluxDB, including writing, querying, and processing data, and managing an InfluxDB 3 instance. servers: - - url: https://{baseurl} - description: InfluxDB 3 Enterprise API URL - variables: - baseurl: - enum: - - localhost:8181 - default: localhost:8181 - description: InfluxDB 3 Enterprise URL + - url: / security: - BearerAuthentication: [] - - TokenAuthentication: [] - - BasicAuthentication: [] - - QuerystringAuthentication: [] tags: + - name: Auth token + description: Manage tokens for authentication and authorization - name: Authentication description: | Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: | Authentication scheme | Works with | - |:-------------------|:-----------| - | [Bearer authentication](#section/Authentication/BearerAuthentication) | All endpoints | - | [Token authentication](#section/Authentication/TokenAuthentication) | v1, v2 endpoints | - | [Basic authentication](#section/Authentication/BasicAuthentication) | v1 endpoints | - | [Querystring authentication](#section/Authentication/QuerystringAuthentication) | v1 endpoints | - + |:----------------------|:-----------| + | Bearer authentication | All endpoints | + | Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) | + | Basic authentication | v1 compatibility endpoints (`/write`, `/query`) | + | Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) | + + See the **Security Schemes** section below for details on each authentication method. x-traitTag: true - name: Cache data description: | @@ -87,35 +86,8 @@ tags: #### Related guides - - [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/) - - [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/) - - name: Compatibility endpoints - description: | - InfluxDB 3 provides compatibility endpoints for InfluxDB 1.x and InfluxDB 2.x workloads and clients. - - ### Write data using v1- or v2-compatible endpoints - - - [`/api/v2/write` endpoint](#operation/PostV2Write) - for InfluxDB v2 clients and when you bring existing InfluxDB v2 write workloads to InfluxDB 3. - - [`/write` endpoint](#operation/PostV1Write) for InfluxDB v1 clients and when you bring existing InfluxDB v1 write workloads to InfluxDB 3. - - For new workloads, use the [`/api/v3/write_lp` endpoint](#operation/PostWriteLP). - - All endpoints accept the same line protocol format. - - ### Query data - - Use the HTTP [`/query`](#operation/GetV1ExecuteQuery) endpoint for InfluxDB v1 clients and v1 query workloads using InfluxQL. - - For new workloads, use one of the following: - - - HTTP [`/api/v3/query_sql` endpoint](#operation/GetExecuteQuerySQL) for new query workloads using SQL. - - HTTP [`/api/v3/query_influxql` endpoint](#operation/GetExecuteInfluxQLQuery) for new query workloads using InfluxQL. - - Flight SQL and InfluxDB 3 _Flight+gRPC_ APIs for querying with SQL or InfluxQL. For more information about using Flight APIs, see [InfluxDB 3 client libraries](https://github.com/InfluxCommunity?q=influxdb3&type=public&language=&sort=). - - ### Server information - - Server information endpoints such as `/health` and `metrics` are compatible with InfluxDB 1.x and InfluxDB 2.x clients. + - [Manage the Distinct Value Cache](/influxdb/version/admin/distinct-value-cache/) + - [Manage the Last Value Cache](/influxdb/version/admin/last-value-cache/) - name: Database description: Manage databases - description: | @@ -139,19 +111,33 @@ tags: | Header | Value type | Description | |:------------------------ |:--------------------- |:-------------------------------------------| | `Accept` | string | The content type that the client can understand. | - | `Authorization` | string | The authorization scheme and credential. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | | `Content-Length` | integer | The size of the entity-body, in bytes. | | `Content-Type` | string | The format of the data in the request body. | name: Headers and parameters x-traitTag: true + - name: Migrate from InfluxDB v1 or v2 + x-traitTag: true + description: | + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools. + Operations marked with v1 or v2 badges are compatible with the respective InfluxDB version. + + ### Migration guides + + - [Migrate from InfluxDB v1](/influxdb/version/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x + - [Migrate from InfluxDB v2](/influxdb/version/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud + - [Use compatibility APIs to write data](/influxdb/version/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints + - [Use the v1 HTTP query API](/influxdb/version/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP - name: Processing engine description: | Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. - InfluxDB 3 Enterprise provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. Use Processing engine plugins and triggers to run code and perform tasks for different database events. - To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb/version/processing-engine/) guide. - name: Query data description: Query data using SQL or InfluxQL - name: Quick start @@ -195,14 +181,12 @@ tags: {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} ``` - For more information about using InfluxDB 3 Enterprise, see the [Get started](/influxdb3/enterprise/get-started/) guide. + For more information about using InfluxDB 3, see the [Get started](/influxdb/version/get-started/) guide. x-traitTag: true - name: Server information description: Retrieve server metrics, status, and version information - name: Table description: Manage table schemas and data - - name: Token - description: Manage tokens for authentication and authorization - name: Write data description: | Write data to InfluxDB 3 using line protocol format. @@ -229,17 +213,13 @@ paths: post: operationId: PostV1Write summary: Write line protocol (v1-compatible) + x-compatibility-version: v1 description: | Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 1.x write workloads using tools such as InfluxDB 1.x client libraries, the Telegraf `outputs.influxdb` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](https://docs.influxdata.com/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Related - - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/compatibilityPrecisionParam' @@ -306,7 +286,7 @@ paths: The response body: - indicates if a partial write occurred or all data was rejected. - - contains details about the [rejected points](/influxdb3/enterprise/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + - contains details about the [rejected points](/influxdb/version/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. content: application/json: examples: @@ -342,27 +322,27 @@ paths: description: Access denied. '413': description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - - Compatibility endpoints - Write data x-influxdata-guides: - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + href: /influxdb/version/write-data/http-api/compatibility-apis/ /api/v2/write: post: operationId: PostV2Write summary: Write line protocol (v2-compatible) + x-compatibility-version: v2 description: | Writes line protocol to the specified database. - - This endpoint provides backward compatibility for InfluxDB 2.x write workloads using tools such as InfluxDB 2.x client libraries, the Telegraf `outputs.influxdb_v2` output plugin, or third-party tools. - - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. - Use query parameters to specify options for writing data. - - #### Related - - - [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ parameters: - name: Content-Type in: header @@ -435,12 +415,14 @@ paths: description: Access denied. '413': description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] tags: - - Compatibility endpoints - Write data x-influxdata-guides: - title: Use compatibility APIs to write data - href: /influxdb3/enterprise/write-data/http-api/compatibility-apis/ + href: /influxdb/version/write-data/http-api/compatibility-apis/ /api/v3/write_lp: post: operationId: PostWriteLP @@ -448,10 +430,10 @@ paths: description: | Writes line protocol to the specified database. - This is the native InfluxDB 3 Enterprise write endpoint that provides enhanced control + This is the native InfluxDB 3 write endpoint that provides enhanced control over write behavior with advanced parameters for high-performance and fault-tolerant operations. - Use this endpoint to send data in [line protocol](/influxdb3/enterprise/reference/syntax/line-protocol/) format to InfluxDB. + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. Use query parameters to specify options for writing data. #### Features @@ -472,7 +454,7 @@ paths: #### Related - - [Use the InfluxDB v3 write_lp API to write data](/influxdb3/enterprise/write-data/http-api/v3-write-lp/) + - [Use the InfluxDB v3 write_lp API to write data](/influxdb/version/write-data/http-api/v3-write-lp/) parameters: - $ref: '#/components/parameters/dbWriteParam' - $ref: '#/components/parameters/accept_partial' @@ -735,15 +717,13 @@ paths: get: operationId: GetV1ExecuteQuery summary: Execute InfluxQL query (v1-compatible) + x-compatibility-version: v1 description: | Executes an InfluxQL query to retrieve data from the specified database. - - This endpoint is compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. - Use query parameters to specify the database and the InfluxQL query. - - #### Related - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + Compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ parameters: - name: Accept in: header @@ -797,8 +777,8 @@ paths: type: string - name: epoch description: | - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/version/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/version/reference/glossary/#rfc3339-timestamp) with nanosecond precision. in: query schema: $ref: '#/components/schemas/EpochCompatibility' @@ -859,21 +839,24 @@ paths: description: Method not allowed. '422': description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - Query data - - Compatibility endpoints x-influxdata-guides: - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + href: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ post: operationId: PostExecuteV1Query summary: Execute InfluxQL query (v1-compatible) - description: | - Executes an InfluxQL query to retrieve data from the specified database. - - #### Related - - - [Use the InfluxDB v1 HTTP query API and InfluxQL to query data](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + x-compatibility-version: v1 + description: Executes an InfluxQL query to retrieve data from the specified database. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ requestBody: content: application/json: @@ -907,8 +890,8 @@ paths: - `u` or `µ` for microseconds - `ns` for nanoseconds - Formats timestamps as [unix (epoch) timestamps](/influxdb3/enterprise/reference/glossary/#unix-timestamp) with the specified precision - instead of [RFC3339 timestamps](/influxdb3/enterprise/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + Formats timestamps as [unix (epoch) timestamps](/influxdb/version/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/version/reference/glossary/#rfc3339-timestamp) with nanosecond precision. enum: - ns - u @@ -978,12 +961,16 @@ paths: description: Method not allowed. '422': description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] tags: - Query data - - Compatibility endpoints x-influxdata-guides: - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data - href: /influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/ + href: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ /health: get: operationId: GetHealth @@ -999,7 +986,8 @@ paths: /api/v1/health: get: operationId: GetHealthV1 - summary: Health check (v1) + summary: Health check (v1-compatible) + x-compatibility-version: v1 description: Checks the status of the service. responses: '200': @@ -1008,7 +996,6 @@ paths: description: Service is unavailable. tags: - Server information - - Compatibility endpoints /ping: get: operationId: GetPing @@ -1042,15 +1029,6 @@ paths: description: Retrieves a list of databases. parameters: - $ref: '#/components/parameters/formatRequired' - - name: show_deleted - in: query - required: false - schema: - type: boolean - default: false - description: | - Include soft-deleted databases in the response. - By default, only active databases are returned. responses: '200': description: Success. The response body contains the list of databases. @@ -1126,11 +1104,12 @@ paths: summary: Remove database retention period description: | Removes the retention period from a database, setting it to infinite retention. + Data in the database will not expire based on time. parameters: - $ref: '#/components/parameters/db' responses: - '204': - description: Success. The database retention period has been removed. + '200': + description: Success. Retention period removed from database. '401': $ref: '#/components/responses/Unauthorized' '404': @@ -1219,55 +1198,6 @@ paths: description: Table not found. tags: - Table - /api/v3/configure/database/{db}: - patch: - operationId: PatchConfigureDatabase - summary: Update a database - description: | - Updates database configuration, such as retention period. - parameters: - - name: db - in: path - required: true - schema: - type: string - description: The name of the database to update. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/UpdateDatabaseRequest' - responses: - '200': - description: Success. The database has been updated. - '400': - description: Bad request. - '401': - $ref: '#/components/responses/Unauthorized' - '404': - description: Database not found. - tags: - - Database - /api/v3/show/license: - get: - operationId: GetShowLicense - summary: Show license information - description: | - Retrieves information about the current InfluxDB 3 Enterprise license. - responses: - '200': - description: Success. The response body contains license information. - content: - application/json: - schema: - $ref: '#/components/schemas/LicenseResponse' - '401': - $ref: '#/components/responses/Unauthorized' - '403': - description: Access denied. - tags: - - Server information /api/v3/configure/distinct_cache: post: operationId: PostConfigureDistinctCache @@ -1275,7 +1205,6 @@ paths: description: Creates a distinct cache for a table. tags: - Cache data - - Table requestBody: required: true content: @@ -1321,7 +1250,6 @@ paths: description: Cache not found. tags: - Cache data - - Table /api/v3/configure/last_cache: post: operationId: PostConfigureLastCache @@ -1346,7 +1274,6 @@ paths: description: Cache already exists. tags: - Cache data - - Table delete: operationId: DeleteConfigureLastCache summary: Delete last cache @@ -1376,7 +1303,6 @@ paths: description: Cache not found. tags: - Cache data - - Table /api/v3/configure/processing_engine_trigger: post: operationId: PostConfigureProcessingEngineTrigger @@ -1386,7 +1312,7 @@ paths: ### Related guides - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + - [Processing engine and Python plugins](/influxdb/version/plugins/) requestBody: required: true content: @@ -1400,7 +1326,7 @@ paths: In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). value: - db: DATABASE_NAME + db: mydb plugin_filename: schedule.py trigger_name: schedule_cron_trigger trigger_specification: cron:0 0 6 * * 1-5 @@ -1532,9 +1458,6 @@ paths: schema: type: boolean default: false - description: | - Force deletion of the trigger even if it has active executions. - By default, deletion fails if the trigger is currently executing. responses: '200': description: Success. The processing engine trigger has been deleted. @@ -1605,7 +1528,7 @@ paths: ### Related guides - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + - [Processing engine and Python plugins](/influxdb/version/plugins/) parameters: - $ref: '#/components/parameters/ContentType' requestBody: @@ -1655,7 +1578,7 @@ paths: ### Related - - [Processing engine and Python plugins](/influxdb3/enterprise/plugins/) + - [Processing engine and Python plugins](/influxdb/version/plugins/) - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) parameters: - $ref: '#/components/parameters/ContentType' @@ -1690,12 +1613,6 @@ paths: operationId: PostTestWALPlugin summary: Test WAL plugin description: Executes a test of a write-ahead logging (WAL) plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/WALPluginTestRequest' responses: '200': description: Success. The plugin test has been executed. @@ -1712,12 +1629,6 @@ paths: operationId: PostTestSchedulingPlugin summary: Test scheduling plugin description: Executes a test of a scheduling plugin. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/SchedulePluginTestRequest' responses: '200': description: Success. The plugin test has been executed. @@ -1813,29 +1724,6 @@ paths: description: Processing failure. tags: - Processing engine - /api/v3/configure/enterprise/token: - post: - operationId: PostCreateResourceToken - summary: Create a resource token - description: | - Creates a resource (fine-grained permissions) token. - A resource token is a token that has access to specific resources in the system. - - This endpoint is only available in InfluxDB 3 Enterprise. - responses: - '201': - description: | - Success. The resource token has been created. - The response body contains the token string and metadata. - content: - application/json: - schema: - $ref: '#/components/schemas/ResourceTokenObject' - '401': - $ref: '#/components/responses/Unauthorized' - tags: - - Authentication - - Token /api/v3/configure/token/admin: post: operationId: PostCreateAdminToken @@ -1854,9 +1742,11 @@ paths: $ref: '#/components/schemas/AdminTokenObject' '401': $ref: '#/components/responses/Unauthorized' + security: + - BearerAuthentication: [] + - {} tags: - - Authentication - - Token + - Auth token /api/v3/configure/token/admin/regenerate: post: operationId: PostRegenerateAdminToken @@ -1875,7 +1765,7 @@ paths: $ref: '#/components/responses/Unauthorized' tags: - Authentication - - Token + - Auth token /api/v3/configure/token: delete: operationId: DeleteToken @@ -1888,7 +1778,8 @@ paths: required: true schema: type: string - description: The ID of the token to delete. + description: | + The ID of the token to delete. responses: '204': description: Success. The token has been deleted. @@ -1898,21 +1789,22 @@ paths: description: Token not found. tags: - Authentication - - Token + - Auth token /api/v3/configure/token/named_admin: post: operationId: PostCreateNamedAdminToken summary: Create named admin token description: | Creates a named admin token. - A named admin token is a special type of admin token with a custom name for identification and management. + A named admin token is an admin token with a specific name identifier. parameters: - name: name in: query required: true schema: type: string - description: The name for the admin token. + description: | + The name for the admin token. responses: '201': description: | @@ -1928,7 +1820,7 @@ paths: description: A token with this name already exists. tags: - Authentication - - Token + - Auth token /api/v3/plugins/files: put: operationId: PutPluginFile @@ -1936,12 +1828,6 @@ paths: description: | Updates a plugin file in the plugin directory. x-security-note: Requires an admin token - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/PluginFileRequest' responses: '204': description: Success. The plugin file has been updated. @@ -1958,21 +1844,86 @@ paths: description: | Updates the plugin directory configuration. x-security-note: Requires an admin token + responses: + '204': + description: Success. The plugin directory has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. + tags: + - Processing engine + /api/v3/configure/database/{db}: + patch: + operationId: PatchConfigureDatabase + summary: Update a database + description: | + Updates database configuration, such as retention period. + parameters: + - name: db + in: path + required: true + schema: + type: string + description: The name of the database to update. requestBody: required: true content: application/json: schema: - $ref: '#/components/schemas/PluginDirectoryRequest' + $ref: '#/components/schemas/UpdateDatabaseRequest' responses: - '204': - description: Success. The plugin directory has been updated. + '200': + description: Success. The database has been updated. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + /api/v3/show/license: + get: + operationId: GetShowLicense + summary: Show license information + description: | + Retrieves information about the current InfluxDB 3 Enterprise license. + responses: + '200': + description: Success. The response body contains license information. + content: + application/json: + schema: + $ref: '#/components/schemas/LicenseResponse' '401': $ref: '#/components/responses/Unauthorized' '403': - description: Forbidden. Admin token required. + description: Access denied. tags: - - Processing engine + - Server information + /api/v3/configure/enterprise/token: + post: + operationId: PostCreateResourceToken + summary: Create a resource token + description: | + Creates a resource (fine-grained permissions) token. + A resource token is a token that has access to specific resources in the system. + This endpoint is only available in InfluxDB 3 Enterprise. + responses: + '201': + description: | + Success. The resource token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/ResourceTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Auth token components: parameters: AcceptQueryHeader: @@ -2101,7 +2052,7 @@ components: type: string description: | Password for v1 compatibility authentication. - For query string authentication, pass a database token with write permissions as this parameter. + For query string authentication, pass an admin token. InfluxDB 3 checks that the `p` value is an authorized token. requestBodies: lineProtocolRequestBody: @@ -2150,44 +2101,6 @@ components: hash: 00xx0Xx0xx00XX0x0 created_at: '2025-04-18T14:02:45.331Z' expiry: null - ResourceTokenObject: - type: object - properties: - token_name: - type: string - permissions: - type: array - items: - type: object - properties: - resource_type: - type: string - enum: - - system - - db - resource_identifier: - type: array - items: - type: string - actions: - type: array - items: - type: string - enum: - - read - - write - expiry_secs: - type: integer - description: The expiration time in seconds. - example: - token_name: All system information - permissions: - - resource_type: system - resource_identifier: - - '*' - actions: - - read - expiry_secs: 300000 ContentEncoding: type: string enum: @@ -2239,8 +2152,8 @@ components: #### Related - - [Use the HTTP API and client libraries to write data](/influxdb3/enterprise/write-data/api-client-libraries/) - - [Data durability](/influxdb3/enterprise/reference/internals/durability/) + - [Use the HTTP API and client libraries to write data](/influxdb/version/write-data/api-client-libraries/) + - [Data durability](/influxdb/version/reference/internals/durability/) PrecisionWriteCompatibility: enum: - ms @@ -2307,13 +2220,9 @@ components: properties: db: type: string - pattern: ^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$|^[a-zA-Z0-9]$ - description: |- - The database name. Database names cannot contain underscores (_). - Names must start and end with alphanumeric characters and can contain hyphens (-) in the middle. retention_period: type: string - description: |- + description: | The retention period for the database. Specifies how long data should be retained. Use duration format (for example, "1d", "1h", "30m", "7d"). example: 7d @@ -2348,12 +2257,6 @@ components: required: - name - type - retention_period: - type: string - description: |- - The retention period for the table. Specifies how long data in this table should be retained. - Use duration format (for example, "1d", "1h", "30m", "7d"). - example: 30d required: - db - table @@ -2365,8 +2268,6 @@ components: type: string table: type: string - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2399,8 +2300,6 @@ components: type: string table: type: string - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' name: type: string description: Optional cache name. @@ -2445,8 +2344,6 @@ components: The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. The plugin file must implement the trigger interface associated with the trigger's specification. - node_spec: - $ref: '#/components/schemas/ApiNodeSpec' trigger_name: type: string trigger_settings: @@ -2455,6 +2352,7 @@ components: allOf: - $ref: '#/components/schemas/TriggerSettings' trigger_specification: + type: string description: | Specifies when and how the processing engine trigger should be invoked. @@ -2546,133 +2444,6 @@ components: required: - run_async - error_behavior - ApiNodeSpec: - type: object - description: | - Optional specification for targeting specific nodes in a multi-node InfluxDB 3 Enterprise cluster. - Use this to control which node(s) should handle the cache or trigger. - properties: - node_id: - type: string - description: | - The ID of a specific node in the cluster. - If specified, the cache or trigger will only be created on this node. - node_group: - type: string - description: | - The name of a node group in the cluster. - If specified, the cache or trigger will be created on all nodes in this group. - WALPluginTestRequest: - type: object - description: | - Request body for testing a write-ahead logging (WAL) plugin. - properties: - filename: - type: string - description: | - The path and filename of the plugin to test. - database: - type: string - description: | - The database name to use for the test. - input_lp: - type: string - description: | - Line protocol data to use as input for the test. - cache_name: - type: string - description: | - Optional name of the cache to use in the test. - input_arguments: - type: object - additionalProperties: - type: string - description: | - Optional key-value pairs of arguments to pass to the plugin. - required: - - filename - - database - - input_lp - SchedulePluginTestRequest: - type: object - description: | - Request body for testing a scheduling plugin. - properties: - filename: - type: string - description: | - The path and filename of the plugin to test. - database: - type: string - description: | - The database name to use for the test. - schedule: - type: string - description: | - Optional schedule specification in cron or interval format. - cache_name: - type: string - description: | - Optional name of the cache to use in the test. - input_arguments: - type: object - additionalProperties: - type: string - description: | - Optional key-value pairs of arguments to pass to the plugin. - required: - - filename - - database - PluginFileRequest: - type: object - description: | - Request body for updating a plugin file. - properties: - plugin_name: - type: string - description: | - The name of the plugin file to update. - content: - type: string - description: | - The content of the plugin file. - required: - - plugin_name - - content - PluginDirectoryRequest: - type: object - description: | - Request body for updating plugin directory with multiple files. - properties: - plugin_name: - type: string - description: | - The name of the plugin directory to update. - files: - type: array - items: - $ref: '#/components/schemas/PluginFileEntry' - description: | - List of plugin files to include in the directory. - required: - - plugin_name - - files - PluginFileEntry: - type: object - description: | - Represents a single file in a plugin directory. - properties: - filename: - type: string - description: | - The name of the file within the plugin directory. - content: - type: string - description: | - The content of the file. - required: - - filename - - content ShowDatabasesResponse: type: object properties: @@ -2814,6 +2585,36 @@ components: description: The current status of the license. example: active description: Response schema for license information. + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + resource_identifier: + type: array + items: + type: string + actions: + type: array + items: + type: string + enum: + - read + - write + expiry_secs: + type: integer + description: The expiration time in seconds. + description: Response schema for resource token creation. responses: Unauthorized: description: Unauthorized access. @@ -2868,12 +2669,6 @@ components: When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token and ignores the `username` part of the decoded credential. - ### Syntax - - ```http - Authorization: Basic - ``` - ### Example ```bash @@ -2885,13 +2680,13 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + - **`DATABASE_NAME`**: your InfluxDB 3 database + - **`AUTH_TOKEN`**: an admin token #### Related guides - - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - [Authenticate v1 API requests](/influxdb/version/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb/version/admin/tokens/) QuerystringAuthentication: type: apiKey in: query @@ -2907,8 +2702,8 @@ components: ### Syntax ```http - https://localhost:8181/query/?[u=any]&p=AUTH_TOKEN - https://localhost:8181/write/?[u=any]&p=AUTH_TOKEN + http://localhost:8181/query/?[u=any]&p=DATABASE_TOKEN + http://localhost:8181/write/?[u=any]&p=DATABASE_TOKEN ``` ### Examples @@ -2921,8 +2716,8 @@ components: Replace the following: - - **`DATABASE_NAME`**: your InfluxDB 3 Enterprise database - - **`AUTH_TOKEN`**: an admin token or database token authorized for the database + - **`DATABASE_NAME`**: your InfluxDB 3 database + - **`AUTH_TOKEN`**: an admin token ```bash ####################################### @@ -2930,7 +2725,7 @@ components: # to query the InfluxDB v1 HTTP API ####################################### # Use authentication query parameters: - # ?p=AUTH_TOKEN + # ?p=DATABASE_TOKEN ####################################### curl --get "http://localhost:8181/query" \ @@ -2942,12 +2737,12 @@ components: Replace the following: - **`DATABASE_NAME`**: the database to query - - **`AUTH_TOKEN`**: a database token with sufficient permissions to the database + - **`AUTH_TOKEN`**: an [admin token](/influxdb/version/admin/tokens/) #### Related guides - - [Authenticate v1 API requests](/influxdb3/enterprise/guides/api-compatibility/v1/) - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - [Authenticate v1 API requests](/influxdb/version/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb/version/admin/tokens/) BearerAuthentication: type: http scheme: bearer @@ -2960,7 +2755,8 @@ components: Bearer authentication works with all endpoints. In your API requests, send an `Authorization` header. - For the header value, provide the word `Bearer` followed by a space and a database token. + For the header value, provide the word `Bearer` followed by a space and an admin token. + ### Syntax @@ -3005,7 +2801,7 @@ components: ### Related guides - - [Manage tokens](/influxdb3/enterprise/admin/tokens/) + - [Manage tokens](/influxdb/version/admin/tokens/) in: header name: Authorization type: apiKey diff --git a/api-docs/influxdb3/shared/management/base.yml b/api-docs/influxdb3/shared/management/base.yml new file mode 100644 index 0000000000..6935675a0f --- /dev/null +++ b/api-docs/influxdb3/shared/management/base.yml @@ -0,0 +1,460 @@ +openapi: 3.1.0 +info: + title: InfluxDB 3 Management API + description: | + The Management API for InfluxDB 3 provides a programmatic interface for managing an InfluxDB 3 cluster. + The Management API lets you integrate functions such as creating and managing databases, permissions, and tokens into your workflow or application. + + This documentation is generated from the + InfluxDB 3 Management API OpenAPI specification. + version: '' + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com +servers: + - url: https://{baseurl}/api/v0 + description: InfluxDB 3 Management API URL + variables: + baseurl: + enum: + - console.influxdata.com + default: console.influxdata.com + description: InfluxDB 3 Console URL +security: + - bearerAuthManagementToken: [] + bearerAuthJwt: [] +tags: + - name: Authentication + x-traitTag: true + description: | + InfluxDB Management API endpoints require authentication with a management token. + - name: Database tokens + description: Manage database read/write tokens for a cluster + - name: Databases + description: Manage databases for a cluster + - name: Quickstart + x-traitTag: true + description: | + See the product-specific documentation for quickstart examples. + - name: Tables + description: List and delete tables for a database +paths: {} +components: + schemas: + Error: + type: object + properties: + code: + type: integer + message: + type: string + examples: + - code: 400 + message: bad request + - code: 401 + message: unauthorized + - code: 403 + message: forbidden + - code: 404 + message: not found + - code: 409 + message: conflict + - code: 500 + message: internal server error + required: + - code + - message + DateTimeRfc3339: + type: string + format: date-time + examples: + - '2023-12-21T17:32:28Z' + UuidV4: + type: string + format: uuid + examples: + - 11111111-1111-4111-8111-111111111111 + - 22222222-1111-4111-8111-111111111111 + ClusterDatabaseName: + description: The name of the cluster database + type: string + examples: + - DatabaseOne + - DatabaseTwo + maxLength: 64 + minLength: 1 + ClusterDatabaseRetentionPeriod: + description: | + The retention period of the [cluster database](/influxdb/version/admin/databases/) in nanoseconds, if applicable + + If the retention period is not set or is set to 0, the database will have infinite retention + type: integer + format: int64 + default: 0 + examples: + - 300000000000 + - 600000000000 + minimum: 0 + ClusterDatabaseMaxTables: + description: The maximum number of tables for the cluster database + type: integer + format: int32 + default: 500 + examples: + - 100 + - 300 + minimum: 1 + ClusterDatabaseMaxColumnsPerTable: + description: The maximum number of columns per table for the cluster database + type: integer + format: int32 + default: 200 + examples: + - 50 + - 150 + minimum: 1 + ClusterDatabasePartitionTemplate: + description: | + A template for [partitioning](/influxdb/version/admin/custom-partitions/) a cluster database. + + Each template part is evaluated in sequence, concatenating the final + partition key from the output of each part, delimited by the partition + key delimiter `|`. + + For example, using the partition template below: + + ```json + [ + { + "type": "time", + "value": "%Y" + }, + { + "type": "tag", + "value": "bananas" + }, + { + "type": "tag", + "value": "plátanos" + }, + { + "type": "bucket", + "value": { + "tagName": "c", + "numberOfBuckets": 10 + } + } + ] + ``` + + The following partition keys are derived: + + * `time=2023-01-01, a=bananas, b=plátanos, c=ananas` -> `2023|bananas|plátanos|5` + * `time=2023-01-01, b=plátanos` -> `2023|!|plátanos|!` + * `time=2023-01-01, another=cat, b=plátanos` -> `2023|!|plátanos|!` + * `time=2023-01-01` -> `2023|!|!|!` + * `time=2023-01-01, a=cat|dog, b=!, c=!` -> `2023|cat%7Cdog|%21|8` + * `time=2023-01-01, a=%50, c=%50` -> `2023|%2550|!|9` + * `time=2023-01-01, a=, c=` -> `2023|^|!|0` + * `time=2023-01-01, a=` -> `2023|#|!|!` + * `time=2023-01-01, c=` -> `2023|!|!|` + + When using the default [partitioning](/influxdb/version/admin/custom-partitions/) template (YYYY-MM-DD) there is no + encoding necessary, as the derived partition key contains a single part, and + no reserved characters. [`TemplatePart::Bucket`] parts by definition will + always be within the part length limit and contain no restricted characters + so are also not percent-encoded and/or truncated. + type: array + items: + $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePart' + examples: + - - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + maxItems: 8 + minItems: 1 + uniqueItems: true + ClusterDatabasePartitionTemplatePart: + description: A sub-part of a `PartitionTemplate` + anyOf: + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartTagValue' + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartTimeFormat' + - $ref: '#/components/schemas/ClusterDatabasePartitionTemplatePartBucket' + examples: + - type: time + value: '%Y' + - type: tag + value: bananas + - type: tag + value: plátanos + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + ClusterDatabasePartitionTemplatePartTagValue: + description: | + A tag value matcher that extracts a string value from the specified tag name + + If a row does not contain a value for the specified tag name, the NULL/missing partition key part `!` is rendered. + type: object + properties: + type: + type: string + enum: + - tag + value: + type: string + minLength: 1 + examples: + - type: tag + value: bananas + - type: tag + value: plátanos + ClusterDatabasePartitionTemplatePartTimeFormat: + description: A time format matcher that accepts a "strftime"-like format string and evaluates it against the "time" column + type: object + properties: + type: + type: string + enum: + - time + value: + type: string + minLength: 1 + examples: + - type: time + value: '%Y' + ClusterDatabasePartitionTemplatePartBucket: + description: | + A bucketing matcher that sorts data through a uniform hash function on the values of the given tag name. + + If a row does not contain a value for the specified tag name, the NULL/missing partition key part `!` is rendered. + type: object + properties: + type: + type: string + enum: + - bucket + value: + type: object + properties: + tagName: + description: The name of the tag used to derive the bucket the data belongs in + type: string + minLength: 1 + numberOfBuckets: + description: The number of buckets tag values are distributed across + type: integer + format: int32 + maximum: 100000 + minimum: 1 + examples: + - type: bucket + value: + tagName: c + numberOfBuckets: 10 + ClusterDatabaseTableName: + description: The name of the [cluster database](/influxdb/version/admin/databases/) table + type: string + examples: + - TableOne + - TableTwo + minLength: 1 + DatabaseTokenDescription: + description: The description of the database token + type: string + examples: + - Limited Access Token + - Full Access Token + DatabaseTokenResourceAllDatabases: + description: A resource value for a [database token](/influxdb/version/admin/tokens/database/) permission that refers to all databases + type: string + enum: + - '*' + DatabaseTokenPermissionAction: + description: The action the [database token](/influxdb/version/admin/tokens/database/) permission allows + type: string + DatabaseTokenPermissionResource: + description: The resource the [database token](/influxdb/version/admin/tokens/database/) permission applies to + anyOf: + - $ref: '#/components/schemas/ClusterDatabaseName' + - $ref: '#/components/schemas/DatabaseTokenResourceAllDatabases' + examples: + - DatabaseOne + - DatabaseTwo + - '*' + DatabaseTokenPermission: + description: The description of the database token + type: object + properties: + action: + $ref: '#/components/schemas/DatabaseTokenPermissionAction' + resource: + $ref: '#/components/schemas/DatabaseTokenPermissionResource' + examples: + - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + - action: write + resource: '*' + DatabaseTokenPermissions: + description: The list of permissions the [database token](/influxdb/version/admin/tokens/database/) allows + type: array + items: + $ref: '#/components/schemas/DatabaseTokenPermission' + examples: + - - action: read + resource: DatabaseOne + - action: write + resource: DatabaseTwo + - - action: write + resource: '*' + DatabaseTokenCreatedAt: + description: | + The date and time that the [database token](/influxdb/version/admin/tokens/database/) was created + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + examples: + - '2023-12-21T17:32:28.000Z' + - '2024-03-02T04:20:19.000Z' + DatabaseTokenExpiresAt: + description: | + The date and time that the database token expires, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenRevokedAt: + description: | + The date and time that the database token was revoked, if applicable + + Uses RFC3339 format + $ref: '#/components/schemas/DateTimeRfc3339' + DatabaseTokenAccessToken: + description: | + The access token that can be used to authenticate query and write requests to the cluster + + The access token is never stored by InfluxDB and is only returned once when the token is created. If the access token is lost, a new token must be created. + type: string + examples: + - apiv1_5555555555555555555555555555555555555555555555555555555555555555 + - apiv1_6666666666666666666666666666666666666666666666666666666666666666 + minLength: 64 + responses: + BadRequest: + description: Bad Request + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 400 + $ref: '#/components/schemas/Error' + example: + code: 400 + message: bad request + Unauthorized: + description: Unauthorized + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 401 + $ref: '#/components/schemas/Error' + example: + code: 401 + message: unauthorized + Forbidden: + description: Forbidden + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 403 + $ref: '#/components/schemas/Error' + example: + code: 403 + message: forbidden + NotFound: + description: Not Found + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 404 + $ref: '#/components/schemas/Error' + example: + code: 404 + message: not found + Conflict: + description: Conflict + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 409 + $ref: '#/components/schemas/Error' + example: + code: 409 + message: conflict + InternalServerError: + description: Internal Server Error + content: + application/json: + schema: + properties: + code: + type: integer + enum: + - 500 + $ref: '#/components/schemas/Error' + example: + code: 500 + message: internal server error + NoContent: + description: No Content + securitySchemes: + bearerAuthManagementToken: + type: http + scheme: bearer + bearerFormat: Management Token + bearerAuthJwt: + type: http + scheme: bearer + bearerFormat: JWT +x-tagGroups: + - name: Using the Management API + tags: + - Authentication + - Quickstart + - name: All endpoints + tags: + - Database tokens + - Databases + - Tables diff --git a/api-docs/influxdb3/shared/v3/base.yml b/api-docs/influxdb3/shared/v3/base.yml new file mode 100644 index 0000000000..110da8eb2c --- /dev/null +++ b/api-docs/influxdb3/shared/v3/base.yml @@ -0,0 +1,2715 @@ +openapi: 3.0.3 +info: + title: InfluxDB 3 API Service + description: | + The InfluxDB HTTP API for InfluxDB 3 provides a programmatic interface for + interacting with InfluxDB 3 databases and resources. + Use this API to: + + - Write data to InfluxDB 3 databases + - Query data using SQL or InfluxQL + - Process data using Processing engine plugins + - Manage databases, tables, and Processing engine triggers + - Perform administrative tasks and access system information + + The API includes endpoints under the following paths: + - `/api/v3`: InfluxDB 3 native endpoints + - `/`: Compatibility endpoints for InfluxDB v1 workloads and clients + - `/api/v2/write`: Compatibility endpoint for InfluxDB v2 workloads and clients + + + version: '3.7.0' + license: + name: MIT + url: https://opensource.org/licenses/MIT + contact: + name: InfluxData + url: https://www.influxdata.com + email: support@influxdata.com +servers: + - url: https://{baseurl} + description: InfluxDB 3 API URL + variables: + baseurl: + enum: + - localhost:8181 + default: localhost:8181 + description: InfluxDB 3 URL +security: + - BearerAuthentication: [] +tags: + - name: Authentication + description: | + Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API: + + | Authentication scheme | Works with | + |:----------------------|:-----------| + | Bearer authentication | All endpoints | + | Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) | + | Basic authentication | v1 compatibility endpoints (`/write`, `/query`) | + | Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) | + + See the **Security Schemes** section below for details on each authentication method. + x-traitTag: true + - name: Cache data + description: | + Manage the in-memory cache. + + #### Distinct Value Cache + + The Distinct Value Cache (DVC) lets you cache distinct + values of one or more columns in a table, improving the performance of + queries that return distinct tag and field values. + + The DVC is an in-memory cache that stores distinct values for specific columns + in a table. When you create an DVC, you can specify what columns' distinct + values to cache, the maximum number of distinct value combinations to cache, and + the maximum age of cached values. A DVC is associated with a table, which can + have multiple DVCs. + + #### Last value cache + + The Last Value Cache (LVC) lets you cache the most recent + values for specific fields in a table, improving the performance of queries that + return the most recent value of a field for specific series or the last N values + of a field. + + The LVC is an in-memory cache that stores the last N number of values for + specific fields of series in a table. When you create an LVC, you can specify + what fields to cache, what tags to use to identify each series, and the + number of values to cache for each unique series. + An LVC is associated with a table, which can have multiple LVCs. + + #### Related guides + + - [Manage the Distinct Value Cache](/influxdb/version/admin/distinct-value-cache/) + - [Manage the Last Value Cache](/influxdb/version/admin/last-value-cache/) + - name: Migrate from InfluxDB v1 or v2 + x-traitTag: true + description: | + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools. + Operations marked with v1 or v2 badges are compatible with the respective InfluxDB version. + + ### Migration guides + + - [Migrate from InfluxDB v1](/influxdb/version/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x + - [Migrate from InfluxDB v2](/influxdb/version/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud + - [Use compatibility APIs to write data](/influxdb/version/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints + - [Use the v1 HTTP query API](/influxdb/version/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP + - name: Database + description: Manage databases + - description: | + Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use. + + ### Common parameters + + The following table shows common parameters used by many InfluxDB API endpoints. + Many endpoints may require other parameters in the query string or in the + request body that perform functions specific to those endpoints. + + | Query parameter | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `db` | string | The database name | + + InfluxDB HTTP API endpoints use standard HTTP request and response headers. + The following table shows common headers used by many InfluxDB API endpoints. + Some endpoints may use other headers that perform functions more specific to those endpoints--for example, + the write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body. + + | Header | Value type | Description | + |:------------------------ |:--------------------- |:-------------------------------------------| + | `Accept` | string | The content type that the client can understand. | + | `Authorization` | string | The [authorization scheme and credential](/influxdb/version/api/authentication/). | + | `Content-Length` | integer | The size of the entity-body, in bytes. | + | `Content-Type` | string | The format of the data in the request body. | + name: Headers and parameters + x-traitTag: true + - name: Processing engine + description: | + Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins. + + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database. + Use Processing engine plugins and triggers to run code and perform tasks for different database events. + + To get started with the processing engine, see the [Processing engine and Python plugins](/influxdb/version/processing-engine/) guide. + - name: Query data + description: Query data using SQL or InfluxQL + - name: Quick start + description: | + 1. [Create an admin token](#section/Authentication) to authorize API requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3, see the [Get started](/influxdb/version/get-started/) guide. + x-traitTag: true + - name: Server information + description: Retrieve server metrics, status, and version information + - name: Table + description: Manage table schemas and data + - name: Auth token + description: Manage tokens for authentication and authorization + - name: Write data + description: | + Write data to InfluxDB 3 using line protocol format. + + #### Timestamp precision across write APIs + + InfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions. + The following table compares timestamp precision support across v1, v2, and v3 write APIs: + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) | + |-----------|---------------|----------------------|-------------------------| + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + | **Hours** | ✅ `h` | ❌ No | ❌ No | + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + All timestamps are stored internally as nanoseconds. +paths: + /write: + post: + operationId: PostV1Write + summary: Write line protocol (v1-compatible) + x-compatibility-version: v1 + description: | + Writes line protocol to the specified database. + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ + parameters: + - $ref: '#/components/parameters/dbWriteParam' + - $ref: '#/components/parameters/compatibilityPrecisionParam' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: consistency + in: query + required: false + schema: + type: string + description: | + Write consistency level. Ignored by InfluxDB 3. Provided for compatibility with InfluxDB 1.x clients. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: '#/components/schemas/LineProtocol' + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: '#/components/parameters/ContentEncoding' + - $ref: '#/components/parameters/ContentLength' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' + responses: + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: | + Bad request. Some (a _partial write_) or all of the data from the batch was rejected and not written. + If a partial write occurred, then some points from the batch are written and queryable. + + The response body: + - indicates if a partial write occurred or all data was rejected. + - contains details about the [rejected points](/influxdb/version/write-data/troubleshoot/#troubleshoot-rejected-points), up to 100 points. + content: + application/json: + examples: + rejectedAllPoints: + summary: Rejected all points in the batch + value: | + { + "error": "write of line protocol failed", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + partialWriteErrorWithRejectedPoints: + summary: Partial write rejected some points in the batch + value: | + { + "error": "partial write of line protocol occurred", + "data": [ + { + "original_line": "dquote> home,room=Kitchen temp=hi", + "line_number": 2, + "error_message": "No fields were provided" + } + ] + } + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] + tags: + - Write data + x-influxdata-guides: + - title: Use compatibility APIs to write data + href: /influxdb/version/write-data/http-api/compatibility-apis/ + /api/v2/write: + post: + operationId: PostV2Write + summary: Write line protocol (v2-compatible) + x-compatibility-version: v2 + description: | + Writes line protocol to the specified database. + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ + parameters: + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: '#/components/schemas/LineProtocol' + required: false + - description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + in: header + name: Content-Encoding + schema: + default: identity + description: | + Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + enum: + - gzip + - identity + type: string + - description: | + The size of the entity-body, in bytes, sent to InfluxDB. + in: header + name: Content-Length + schema: + description: The length in decimal number of octets. + type: integer + - description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + in: header + name: Accept + schema: + default: application/json + description: Error content type. + enum: + - application/json + type: string + - name: db + in: query + required: true + schema: + type: string + description: | + A database name. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + - name: accept_partial + in: query + required: false + schema: + $ref: '#/components/schemas/AcceptPartial' + - $ref: '#/components/parameters/compatibilityPrecisionParam' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' + responses: + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + tags: + - Write data + x-influxdata-guides: + - title: Use compatibility APIs to write data + href: /influxdb/version/write-data/http-api/compatibility-apis/ + /api/v3/write_lp: + post: + operationId: PostWriteLP + summary: Write line protocol + description: | + Writes line protocol to the specified database. + + This is the native InfluxDB 3 write endpoint that provides enhanced control + over write behavior with advanced parameters for high-performance and fault-tolerant operations. + + Use this endpoint to send data in [line protocol](/influxdb/version/reference/syntax/line-protocol/) format to InfluxDB. + Use query parameters to specify options for writing data. + + #### Features + + - **Partial writes**: Use `accept_partial=true` to allow partial success when some lines in a batch fail + - **Asynchronous writes**: Use `no_sync=true` to skip waiting for WAL synchronization, allowing faster response times but sacrificing durability guarantees + - **Flexible precision**: Automatic timestamp precision detection with `precision=auto` (default) + + #### Auto precision detection + + When you use `precision=auto` or omit the precision parameter, InfluxDB 3 automatically detects + the timestamp precision based on the magnitude of the timestamp value: + + - Timestamps < 5e9 → Second precision (multiplied by 1,000,000,000 to convert to nanoseconds) + - Timestamps < 5e12 → Millisecond precision (multiplied by 1,000,000) + - Timestamps < 5e15 → Microsecond precision (multiplied by 1,000) + - Larger timestamps → Nanosecond precision (no conversion needed) + + #### Related + + - [Use the InfluxDB v3 write_lp API to write data](/influxdb/version/write-data/http-api/v3-write-lp/) + parameters: + - $ref: '#/components/parameters/dbWriteParam' + - $ref: '#/components/parameters/accept_partial' + - $ref: '#/components/parameters/precisionParam' + - name: no_sync + in: query + schema: + $ref: '#/components/schemas/NoSync' + - name: Content-Type + in: header + description: | + The content type of the request payload. + schema: + $ref: '#/components/schemas/LineProtocol' + required: false + - name: Accept + in: header + description: | + The content type that the client can understand. + Writes only return a response body if they fail (partially or completely)--for example, + due to a syntax problem or type mismatch. + schema: + type: string + default: application/json + enum: + - application/json + required: false + - $ref: '#/components/parameters/ContentEncoding' + - $ref: '#/components/parameters/ContentLength' + requestBody: + $ref: '#/components/requestBodies/lineProtocolRequestBody' + responses: + '204': + description: Success ("No Content"). All data in the batch is written and queryable. + headers: + cluster-uuid: + $ref: '#/components/headers/ClusterUUID' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '413': + description: Request entity too large. + '422': + description: Unprocessable entity. + x-codeSamples: + - label: cURL - Basic write + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000000000" + - label: cURL - Write with millisecond precision + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ms" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 1638360000000" + - label: cURL - Asynchronous write with partial acceptance + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&accept_partial=true&no_sync=true&precision=auto" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01 usage=85.2 + memory,host=server01 used=4096" + - label: cURL - Multiple measurements with tags + lang: Shell + source: | + curl --request POST "http://localhost:8181/api/v3/write_lp?db=sensors&precision=ns" \ + --header "Authorization: Bearer DATABASE_TOKEN" \ + --header "Content-Type: text/plain" \ + --data-raw "cpu,host=server01,region=us-west usage=85.2,load=0.75 1638360000000000000 + memory,host=server01,region=us-west used=4096,free=12288 1638360000000000000 + disk,host=server01,region=us-west,device=/dev/sda1 used=50.5,free=49.5 1638360000000000000" + tags: + - Write data + /api/v3/query_sql: + get: + operationId: GetExecuteQuerySQL + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/db' + - $ref: '#/components/parameters/querySqlParam' + - $ref: '#/components/parameters/format' + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' + responses: + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - '2024-02-02T12:00:00Z' + - 42 + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + tags: + - Query data + post: + operationId: PostExecuteQuerySQL + summary: Execute SQL query + description: Executes an SQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' + requestBody: + $ref: '#/components/requestBodies/queryRequestBody' + responses: + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + tags: + - Query data + /api/v3/query_influxql: + get: + operationId: GetExecuteInfluxQLQuery + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/dbQueryParam' + - name: q + in: query + required: true + schema: + type: string + - name: format + in: query + required: false + schema: + type: string + - $ref: '#/components/parameters/AcceptQueryHeader' + responses: + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + tags: + - Query data + post: + operationId: PostExecuteQueryInfluxQL + summary: Execute InfluxQL query + description: Executes an InfluxQL query to retrieve data from the specified database. + parameters: + - $ref: '#/components/parameters/AcceptQueryHeader' + - $ref: '#/components/parameters/ContentType' + requestBody: + $ref: '#/components/requestBodies/queryRequestBody' + responses: + '200': + description: Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + text/csv: + schema: + type: string + application/vnd.apache.parquet: + schema: + type: string + application/jsonl: + schema: + type: string + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + tags: + - Query data + /query: + get: + operationId: GetV1ExecuteQuery + summary: Execute InfluxQL query (v1-compatible) + x-compatibility-version: v1 + description: | + Executes an InfluxQL query to retrieve data from the specified database. + Compatible with InfluxDB 1.x client libraries and third-party integrations such as Grafana. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: | + The content type that the client can understand. + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. + + Returns an error if the format is invalid or non-UTF8. + - in: query + name: chunked + description: | + If true, the response is divided into chunks of size `chunk_size`. + schema: + type: boolean + default: false + - in: query + name: chunk_size + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + schema: + type: integer + default: 10000 + - in: query + name: db + description: The database to query. If not provided, the InfluxQL query string must specify the database. + schema: + type: string + format: InfluxQL + - in: query + name: pretty + description: | + If true, the JSON response is formatted in a human-readable format. + schema: + type: boolean + default: false + - in: query + name: q + description: The InfluxQL query string. + required: true + schema: + type: string + - name: epoch + description: | + Formats timestamps as [unix (epoch) timestamps](/influxdb/version/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/version/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + in: query + schema: + $ref: '#/components/schemas/EpochCompatibility' + - $ref: '#/components/parameters/v1UsernameParam' + - $ref: '#/components/parameters/v1PasswordParam' + - name: rp + in: query + required: false + schema: + type: string + description: | + Retention policy name. Honored but discouraged. InfluxDB 3 doesn't use retention policies. + - name: Authorization + in: header + required: false + schema: + type: string + description: | + Authorization header for token-based authentication. + Supported schemes: + - `Bearer AUTH_TOKEN` - OAuth bearer token scheme + - `Token AUTH_TOKEN` - InfluxDB v2 token scheme + - `Basic ` - Basic authentication (username is ignored) + responses: + '200': + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + application/csv: + schema: + type: string + headers: + Content-Type: + description: | + The content type of the response. + Default is `application/json`. + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] + tags: + - Query data + x-influxdata-guides: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + post: + operationId: PostExecuteV1Query + summary: Execute InfluxQL query (v1-compatible) + x-compatibility-version: v1 + description: Executes an InfluxQL query to retrieve data from the specified database. + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + requestBody: + content: + application/json: + schema: + type: object + properties: + db: + type: string + description: The database to query. If not provided, the InfluxQL query string must specify the database. + q: + description: The InfluxQL query string. + type: string + chunked: + description: | + If true, the response is divided into chunks of size `chunk_size`. + type: boolean + chunk_size: + description: | + The number of records that will go into a chunk. + This parameter is only used if `chunked=true`. + type: integer + default: 10000 + epoch: + description: | + A unix timestamp precision. + + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + + Formats timestamps as [unix (epoch) timestamps](/influxdb/version/reference/glossary/#unix-timestamp) with the specified precision + instead of [RFC3339 timestamps](/influxdb/version/reference/glossary/#rfc3339-timestamp) with nanosecond precision. + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + pretty: + description: | + If true, the JSON response is formatted in a human-readable format. + type: boolean + required: + - q + parameters: + - name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + - text/csv + required: false + description: | + The content type that the client can understand. + + If `text/csv` is specified, the `Content-type` response header is `application/csv` and the response is formatted as CSV. + + Returns an error if the format is invalid or non-UTF8. + responses: + '200': + description: | + Success. The response body contains query results. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryResponse' + application/csv: + schema: + type: string + headers: + Content-Type: + description: | + The content type of the response. + Default is `application/json`. + + If the `Accept` request header is `application/csv` or `text/csv`, the `Content-type` response header is `application/csv` + and the response is formatted as CSV. + schema: + type: string + default: application/json + enum: + - application/json + - application/csv + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Access denied. + '404': + description: Database not found. + '405': + description: Method not allowed. + '422': + description: Unprocessable entity. + security: + - BearerAuthentication: [] + - TokenAuthentication: [] + - BasicAuthentication: [] + - QuerystringAuthentication: [] + tags: + - Query data + x-influxdata-guides: + - title: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + href: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + /health: + get: + operationId: GetHealth + summary: Health check + description: Checks the status of the service. + responses: + '200': + description: Service is running. + '500': + description: Service is unavailable. + tags: + - Server information + /api/v1/health: + get: + operationId: GetHealthV1 + summary: Health check (v1-compatible) + x-compatibility-version: v1 + description: Checks the status of the service. + responses: + '200': + description: Service is running. + '500': + description: Service is unavailable. + tags: + - Server information + /ping: + get: + operationId: GetPing + tags: + - Server information + summary: Ping the server + description: Returns version information for the server. + responses: + '200': + description: Success. The response body contains server information. + content: + application/json: + schema: + example: + version: 0.1.0 + revision: f3d3d3d + /metrics: + get: + operationId: GetMetrics + summary: Metrics + description: Retrieves Prometheus-compatible server metrics. + responses: + '200': + description: Success. The response body contains Prometheus-compatible server metrics. + tags: + - Server information + /api/v3/configure/database: + get: + operationId: GetConfigureDatabase + summary: List databases + description: Retrieves a list of databases. + parameters: + - $ref: '#/components/parameters/formatRequired' + responses: + '200': + description: Success. The response body contains the list of databases. + content: + application/json: + schema: + $ref: '#/components/schemas/ShowDatabasesResponse' + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + post: + operationId: PostConfigureDatabase + summary: Create a database + description: Creates a new database in the system. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateDatabaseRequest' + responses: + '201': + description: Success. Database created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '409': + description: Database already exists. + tags: + - Database + delete: + operationId: DeleteConfigureDatabase + summary: Delete a database + description: | + Soft deletes a database. + The database is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + parameters: + - $ref: '#/components/parameters/db' + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the database for hard deletion at the specified time. + If not provided, the database will be soft deleted. + Use ISO 8601 date-time format (for example, "2025-12-31T23:59:59Z"). + + #### Deleting a database cannot be undone + + Deleting a database is a destructive action. + Once a database is deleted, data stored in that database cannot be recovered. + responses: + '200': + description: Success. Database deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + /api/v3/configure/database/retention_period: + delete: + operationId: DeleteDatabaseRetentionPeriod + summary: Remove database retention period + description: | + Removes the retention period from a database, setting it to infinite retention. + Data in the database will not expire based on time. + parameters: + - $ref: '#/components/parameters/db' + responses: + '200': + description: Success. Retention period removed from database. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Database + /api/v3/configure/table: + post: + operationId: PostConfigureTable + summary: Create a table + description: Creates a new table within a database. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CreateTableRequest' + responses: + '201': + description: Success. The table has been created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Database not found. + tags: + - Table + delete: + operationId: DeleteConfigureTable + summary: Delete a table + description: | + Soft deletes a table. + The table is scheduled for deletion and unavailable for querying. + Use the `hard_delete_at` parameter to schedule a hard deletion. + + #### Deleting a table cannot be undone + + Deleting a table is a destructive action. + Once a table is deleted, data stored in that table cannot be recovered. + parameters: + - $ref: '#/components/parameters/db' + - name: table + in: query + required: true + schema: + type: string + - name: hard_delete_at + in: query + required: false + schema: + type: string + format: date-time + description: | + Schedule the table for hard deletion at the specified time. + If not provided, the table will be soft deleted. + Use ISO 8601 format (for example, "2025-12-31T23:59:59Z"). + responses: + '200': + description: Success (no content). The table has been deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Table not found. + tags: + - Table + /api/v3/configure/distinct_cache: + post: + operationId: PostConfigureDistinctCache + summary: Create distinct cache + description: Creates a distinct cache for a table. + tags: + - Cache data + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/DistinctCacheCreateRequest' + responses: + '201': + description: Success. The distinct cache has been created. + '204': + description: Not created. A distinct cache with this configuration already exists. + '400': + description: | + Bad request. + + The server responds with status `400` if the request would overwrite an existing cache with a different configuration. + delete: + operationId: DeleteConfigureDistinctCache + summary: Delete distinct cache + description: Deletes a distinct cache. + parameters: + - $ref: '#/components/parameters/db' + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the distinct cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the distinct cache to delete. + responses: + '200': + description: Success. The distinct cache has been deleted. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. + tags: + - Cache data + /api/v3/configure/last_cache: + post: + operationId: PostConfigureLastCache + summary: Create last cache + description: Creates a last cache for a table. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/LastCacheCreateRequest' + responses: + '201': + description: Success. Last cache created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. + '409': + description: Cache already exists. + tags: + - Cache data + delete: + operationId: DeleteConfigureLastCache + summary: Delete last cache + description: Deletes a last cache. + parameters: + - $ref: '#/components/parameters/db' + - name: table + in: query + required: true + schema: + type: string + description: The name of the table containing the last cache. + - name: name + in: query + required: true + schema: + type: string + description: The name of the last cache to delete. + responses: + '200': + description: Success. The last cache has been deleted. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Cache not found. + tags: + - Cache data + /api/v3/configure/processing_engine_trigger: + post: + operationId: PostConfigureProcessingEngineTrigger + summary: Create processing engine trigger + description: | + Creates a processing engine trigger with the specified plugin file and trigger specification. + + ### Related guides + + - [Processing engine and Python plugins](/influxdb/version/plugins/) + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + examples: + schedule_cron: + summary: Schedule trigger using cron + description: | + In `"cron:CRON_EXPRESSION"`, `CRON_EXPRESSION` uses extended 6-field cron format. + The cron expression `0 0 6 * * 1-5` means the trigger will run at 6:00 AM every weekday (Monday to Friday). + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_cron_trigger + trigger_specification: cron:0 0 6 * * 1-5 + trigger_settings: + run_async: false + error_behavior: Log + schedule_every: + summary: Schedule trigger using interval + description: | + In `"every:DURATION"`, `DURATION` specifies the interval between trigger executions. + The duration `1h` means the trigger will run every hour. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_trigger + trigger_specification: every:1h + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_seconds: + summary: Schedule trigger using seconds interval + description: | + Example of scheduling a trigger to run every 30 seconds. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_30s_trigger + trigger_specification: every:30s + trigger_settings: + run_async: false + error_behavior: Log + schedule_every_minutes: + summary: Schedule trigger using minutes interval + description: | + Example of scheduling a trigger to run every 5 minutes. + value: + db: mydb + plugin_filename: schedule.py + trigger_name: schedule_every_5m_trigger + trigger_specification: every:5m + trigger_settings: + run_async: false + error_behavior: Log + all_tables: + summary: All tables trigger example + description: | + Trigger that fires on write events to any table in the database. + value: + db: mydb + plugin_filename: all_tables.py + trigger_name: all_tables_trigger + trigger_specification: all_tables + trigger_settings: + run_async: false + error_behavior: Log + table_specific: + summary: Table-specific trigger example + description: | + Trigger that fires on write events to a specific table. + value: + db: mydb + plugin_filename: table.py + trigger_name: table_trigger + trigger_specification: table:sensors + trigger_settings: + run_async: false + error_behavior: Log + api_request: + summary: On-demand request trigger example + description: | + Creates an HTTP endpoint `/api/v3/engine/hello-world` for manual invocation. + value: + db: mydb + plugin_filename: request.py + trigger_name: hello_world_trigger + trigger_specification: request:hello-world + trigger_settings: + run_async: false + error_behavior: Log + cron_friday_afternoon: + summary: Cron trigger for Friday afternoons + description: | + Example of a cron trigger that runs every Friday at 2:30 PM. + value: + db: reports + plugin_filename: weekly_report.py + trigger_name: friday_report_trigger + trigger_specification: cron:0 30 14 * * 5 + trigger_settings: + run_async: false + error_behavior: Log + cron_monthly: + summary: Cron trigger for monthly execution + description: | + Example of a cron trigger that runs on the first day of every month at midnight. + value: + db: monthly_data + plugin_filename: monthly_cleanup.py + trigger_name: monthly_cleanup_trigger + trigger_specification: cron:0 0 0 1 * * + trigger_settings: + run_async: false + error_behavior: Log + responses: + '200': + description: Success. Processing engine trigger created. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + delete: + operationId: DeleteConfigureProcessingEngineTrigger + summary: Delete processing engine trigger + description: Deletes a processing engine trigger. + parameters: + - $ref: '#/components/parameters/db' + - name: trigger_name + in: query + required: true + schema: + type: string + - name: force + in: query + required: false + schema: + type: boolean + default: false + responses: + '200': + description: Success. The processing engine trigger has been deleted. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/disable: + post: + operationId: PostDisableProcessingEngineTrigger + summary: Disable processing engine trigger + description: Disables a processing engine trigger. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + responses: + '200': + description: Success. The processing engine trigger has been disabled. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/processing_engine_trigger/enable: + post: + operationId: PostEnableProcessingEngineTrigger + summary: Enable processing engine trigger + description: Enables a processing engine trigger. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ProcessingEngineTriggerRequest' + responses: + '200': + description: Success. The processing engine trigger is enabled. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Trigger not found. + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_packages: + post: + operationId: PostInstallPluginPackages + summary: Install plugin packages + description: | + Installs the specified Python packages into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the packages are installed. + + ### Related guides + + - [Processing engine and Python plugins](/influxdb/version/plugins/) + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + packages: + type: array + items: + type: string + description: | + A list of Python package names to install. + Can include version specifiers (e.g., "scipy==1.9.0"). + example: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + required: + - packages + example: + packages: + - influxdb3-python + - scipy + - pandas==1.5.0 + - requests + responses: + '200': + description: Success. The packages are installed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Processing engine + /api/v3/configure/plugin_environment/install_requirements: + post: + operationId: PostInstallPluginRequirements + summary: Install plugin requirements + description: | + Installs requirements from a requirements file (also known as a "pip requirements file") into the processing engine plugin environment. + + This endpoint is synchronous and blocks until the requirements are installed. + + ### Related + + - [Processing engine and Python plugins](/influxdb/version/plugins/) + - [Python requirements file format](https://pip.pypa.io/en/stable/reference/requirements-file-format/) + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + requirements_location: + type: string + description: | + The path to the requirements file containing Python packages to install. + Can be a relative path (relative to the plugin directory) or an absolute path. + example: requirements.txt + required: + - requirements_location + example: + requirements_location: requirements.txt + responses: + '200': + description: Success. The requirements have been installed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Processing engine + /api/v3/plugin_test/wal: + post: + operationId: PostTestWALPlugin + summary: Test WAL plugin + description: Executes a test of a write-ahead logging (WAL) plugin. + responses: + '200': + description: Success. The plugin test has been executed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not enabled. + tags: + - Processing engine + /api/v3/plugin_test/schedule: + post: + operationId: PostTestSchedulingPlugin + summary: Test scheduling plugin + description: Executes a test of a scheduling plugin. + responses: + '200': + description: Success. The plugin test has been executed. + '400': + description: Bad request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not enabled. + tags: + - Processing engine + /api/v3/engine/{request_path}: + parameters: + - name: request_path + description: | + The path configured in the request trigger specification for the plugin. + + For example, if you define a trigger with the following: + + ```json + trigger_specification: "request:hello-world" + ``` + + then, the HTTP API exposes the following plugin endpoint: + + ``` + /api/v3/engine/hello-world + ``` + in: path + required: true + schema: + type: string + get: + operationId: GetProcessingEnginePluginRequest + summary: On Request processing engine plugin request + description: | + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + + An On Request plugin implements the following signature: + + ```python + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + ``` + + The response depends on the plugin implementation. + responses: + '200': + description: Success. The plugin request has been executed. + '400': + description: Malformed request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not found. + '500': + description: Processing failure. + tags: + - Processing engine + post: + operationId: PostProcessingEnginePluginRequest + summary: On Request processing engine plugin request + description: | + Executes the On Request processing engine plugin specified in the trigger's `plugin_filename`. + The request can include request headers, query string parameters, and a request body, which InfluxDB passes to the plugin. + + An On Request plugin implements the following signature: + + ```python + def process_request(influxdb3_local, query_parameters, request_headers, request_body, args=None) + ``` + + The response depends on the plugin implementation. + parameters: + - $ref: '#/components/parameters/ContentType' + requestBody: + required: false + content: + application/json: + schema: + type: object + additionalProperties: true + responses: + '200': + description: Success. The plugin request has been executed. + '400': + description: Malformed request. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Plugin not found. + '500': + description: Processing failure. + tags: + - Processing engine + /api/v3/configure/token/admin: + post: + operationId: PostCreateAdminToken + summary: Create admin token + description: | + Creates an admin token. + An admin token is a special type of token that has full access to all resources in the system. + responses: + '201': + description: | + Success. The admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + security: + - BearerAuthentication: [] + - {} # No auth required for initial token creation + tags: + - Auth token + /api/v3/configure/token/admin/regenerate: + post: + operationId: PostRegenerateAdminToken + summary: Regenerate admin token + description: | + Regenerates an admin token and revokes the previous token with the same name. + parameters: [] + responses: + '201': + description: Success. The admin token has been regenerated. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + tags: + - Authentication + - Auth token + /api/v3/configure/token: + delete: + operationId: DeleteToken + summary: Delete token + description: | + Deletes a token. + parameters: + - name: id + in: query + required: true + schema: + type: string + description: | + The ID of the token to delete. + responses: + '204': + description: Success. The token has been deleted. + '401': + $ref: '#/components/responses/Unauthorized' + '404': + description: Token not found. + tags: + - Authentication + - Auth token + /api/v3/configure/token/named_admin: + post: + operationId: PostCreateNamedAdminToken + summary: Create named admin token + description: | + Creates a named admin token. + A named admin token is an admin token with a specific name identifier. + parameters: + - name: name + in: query + required: true + schema: + type: string + description: | + The name for the admin token. + responses: + '201': + description: | + Success. The named admin token has been created. + The response body contains the token string and metadata. + content: + application/json: + schema: + $ref: '#/components/schemas/AdminTokenObject' + '401': + $ref: '#/components/responses/Unauthorized' + '409': + description: A token with this name already exists. + tags: + - Authentication + - Auth token + /api/v3/plugins/files: + put: + operationId: PutPluginFile + summary: Update plugin file + description: | + Updates a plugin file in the plugin directory. + x-security-note: Requires an admin token + responses: + '204': + description: Success. The plugin file has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. + tags: + - Processing engine + /api/v3/plugins/directory: + put: + operationId: PutPluginDirectory + summary: Update plugin directory + description: | + Updates the plugin directory configuration. + x-security-note: Requires an admin token + responses: + '204': + description: Success. The plugin directory has been updated. + '401': + $ref: '#/components/responses/Unauthorized' + '403': + description: Forbidden. Admin token required. + tags: + - Processing engine +components: + parameters: + AcceptQueryHeader: + name: Accept + in: header + schema: + type: string + default: application/json + enum: + - application/json + - application/jsonl + - application/vnd.apache.parquet + - text/csv + required: false + description: | + The content type that the client can understand. + ContentEncoding: + name: Content-Encoding + in: header + description: | + The compression applied to the line protocol in the request payload. + To send a gzip payload, pass `Content-Encoding: gzip` header. + schema: + $ref: '#/components/schemas/ContentEncoding' + required: false + ContentLength: + name: Content-Length + in: header + description: | + The size of the entity-body, in bytes, sent to InfluxDB. + schema: + $ref: '#/components/schemas/ContentLength' + ContentType: + name: Content-Type + description: | + The format of the data in the request body. + in: header + schema: + type: string + enum: + - application/json + required: false + db: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + dbWriteParam: + name: db + in: query + required: true + schema: + type: string + description: | + The name of the database. + InfluxDB creates the database if it doesn't already exist, and then + writes all points in the batch to the database. + dbQueryParam: + name: db + in: query + required: false + schema: + type: string + description: | + The name of the database. + + If you provide a query that specifies the database, you can omit the 'db' parameter from your request. + accept_partial: + name: accept_partial + in: query + required: false + schema: + $ref: '#/components/schemas/AcceptPartial' + compatibilityPrecisionParam: + name: precision + in: query + required: true + schema: + $ref: '#/components/schemas/PrecisionWriteCompatibility' + description: The precision for unix timestamps in the line protocol batch. + precisionParam: + name: precision + in: query + required: true + schema: + $ref: '#/components/schemas/PrecisionWrite' + description: The precision for unix timestamps in the line protocol batch. + querySqlParam: + name: q + in: query + required: true + schema: + type: string + format: SQL + description: | + The query to execute. + format: + name: format + in: query + required: false + schema: + $ref: '#/components/schemas/Format' + formatRequired: + name: format + in: query + required: true + schema: + $ref: '#/components/schemas/Format' + v1UsernameParam: + name: u + in: query + required: false + schema: + type: string + description: | + Username for v1 compatibility authentication. + When using Basic authentication or query string authentication, InfluxDB 3 ignores this parameter but allows any arbitrary string for compatibility with InfluxDB 1.x clients. + v1PasswordParam: + name: p + in: query + required: false + schema: + type: string + description: | + Password for v1 compatibility authentication. + For query string authentication, pass an admin token. + InfluxDB 3 checks that the `p` value is an authorized token. + requestBodies: + lineProtocolRequestBody: + required: true + content: + text/plain: + schema: + type: string + examples: + line: + summary: Example line protocol + value: measurement,tag=value field=1 1234567890 + multiline: + summary: Example line protocol with UTF-8 characters + value: | + measurement,tag=value field=1 1234567890 + measurement,tag=value field=2 1234567900 + measurement,tag=value field=3 1234568000 + queryRequestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/QueryRequestObject' + schemas: + AdminTokenObject: + type: object + properties: + id: + type: integer + name: + type: string + token: + type: string + hash: + type: string + created_at: + type: string + format: date-time + expiry: + format: date-time + example: + id: 0 + name: _admin + token: apiv3_00xx0Xx0xx00XX0x0 + hash: 00xx0Xx0xx00XX0x0 + created_at: '2025-04-18T14:02:45.331Z' + expiry: null + ContentEncoding: + type: string + enum: + - gzip + - identity + description: | + Content coding. + Use `gzip` for compressed data or `identity` for unmodified, uncompressed data. + + #### Multi-member gzip support + + InfluxDB 3 supports multi-member gzip payloads (concatenated gzip files per [RFC 1952](https://www.rfc-editor.org/rfc/rfc1952)). + This allows you to: + - Concatenate multiple gzip files and send them in a single request + - Maintain compatibility with InfluxDB v1 and v2 write endpoints + - Simplify batch operations using standard compression tools + default: identity + LineProtocol: + type: string + enum: + - text/plain + - text/plain; charset=utf-8 + description: | + `text/plain` is the content type for line protocol. `UTF-8` is the default character set. + default: text/plain; charset=utf-8 + ContentLength: + type: integer + description: The length in decimal number of octets. + Database: + type: string + AcceptPartial: + type: boolean + default: true + description: Accept partial writes. + Format: + type: string + enum: + - json + - csv + - parquet + - jsonl + description: | + The format of data in the response body. + NoSync: + type: boolean + default: false + description: | + Acknowledges a successful write without waiting for WAL persistence. + + #### Related + + - [Use the HTTP API and client libraries to write data](/influxdb/version/write-data/api-client-libraries/) + - [Data durability](/influxdb/version/reference/internals/durability/) + PrecisionWriteCompatibility: + enum: + - ms + - s + - us + - ns + type: string + description: | + The precision for unix timestamps in the line protocol batch. + Use `ms` for milliseconds, `s` for seconds, `us` for microseconds, or `ns` for nanoseconds. + PrecisionWrite: + enum: + - auto + - nanosecond + - microsecond + - millisecond + - second + type: string + description: | + The precision for unix timestamps in the line protocol batch. + + Supported values: + - `auto` (default): Automatically detects precision based on timestamp magnitude + - `nanosecond`: Nanoseconds + - `microsecond`: Microseconds + - `millisecond`: Milliseconds + - `second`: Seconds + QueryRequestObject: + type: object + properties: + db: + description: | + The name of the database to query. + Required if the query (`q`) doesn't specify the database. + type: string + q: + description: The query to execute. + type: string + format: + description: The format of the query results. + type: string + enum: + - json + - csv + - parquet + - jsonl + - pretty + params: + description: | + Additional parameters for the query. + Use this field to pass query parameters. + type: object + additionalProperties: true + required: + - db + - q + example: + db: mydb + q: SELECT * FROM mytable + format: json + params: {} + CreateDatabaseRequest: + type: object + properties: + db: + type: string + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + required: + - db + CreateTableRequest: + type: object + properties: + db: + type: string + table: + type: string + tags: + type: array + items: + type: string + fields: + type: array + items: + type: object + properties: + name: + type: string + type: + type: string + enum: + - utf8 + - int64 + - uint64 + - float64 + - bool + required: + - name + - type + required: + - db + - table + - tags + DistinctCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + name: + type: string + description: Optional cache name. + columns: + type: array + items: + type: string + max_cardinality: + type: integer + description: Optional maximum cardinality. + max_age: + type: integer + description: Optional maximum age in seconds. + required: + - db + - table + - columns + example: + db: mydb + table: mytable + columns: + - tag1 + - tag2 + max_cardinality: 1000 + max_age: 3600 + LastCacheCreateRequest: + type: object + properties: + db: + type: string + table: + type: string + name: + type: string + description: Optional cache name. + key_columns: + type: array + items: + type: string + description: Optional list of key columns. + value_columns: + type: array + items: + type: string + description: Optional list of value columns. + count: + type: integer + description: Optional count. + ttl: + type: integer + description: Optional time-to-live in seconds. + required: + - db + - table + example: + db: mydb + table: mytable + key_columns: + - tag1 + value_columns: + - field1 + count: 100 + ttl: 3600 + ProcessingEngineTriggerRequest: + type: object + properties: + db: + type: string + plugin_filename: + type: string + description: | + The path and filename of the plugin to execute--for example, + `schedule.py` or `endpoints/report.py`. + The path can be absolute or relative to the `--plugins-dir` directory configured when starting InfluxDB 3. + + The plugin file must implement the trigger interface associated with the trigger's specification. + trigger_name: + type: string + trigger_settings: + description: | + Configuration for trigger error handling and execution behavior. + allOf: + - $ref: '#/components/schemas/TriggerSettings' + trigger_specification: + type: string + description: | + Specifies when and how the processing engine trigger should be invoked. + + ## Supported trigger specifications: + + ### Cron-based scheduling + Format: `cron:CRON_EXPRESSION` + + Uses extended (6-field) cron format (second minute hour day_of_month month day_of_week): + ``` + ┌───────────── second (0-59) + │ ┌───────────── minute (0-59) + │ │ ┌───────────── hour (0-23) + │ │ │ ┌───────────── day of month (1-31) + │ │ │ │ ┌───────────── month (1-12) + │ │ │ │ │ ┌───────────── day of week (0-6, Sunday=0) + │ │ │ │ │ │ + * * * * * * + ``` + Examples: + - `cron:0 0 6 * * 1-5` - Every weekday at 6:00 AM + - `cron:0 30 14 * * 5` - Every Friday at 2:30 PM + - `cron:0 0 0 1 * *` - First day of every month at midnight + + ### Interval-based scheduling + Format: `every:DURATION` + + Supported durations: `s` (seconds), `m` (minutes), `h` (hours), `d` (days), `w` (weeks), `M` (months), `y` (years): + - `every:30s` - Every 30 seconds + - `every:5m` - Every 5 minutes + - `every:1h` - Every hour + - `every:1d` - Every day + - `every:1w` - Every week + - `every:1M` - Every month + - `every:1y` - Every year + + **Maximum interval**: 1 year + + ### Table-based triggers + - `all_tables` - Triggers on write events to any table in the database + - `table:TABLE_NAME` - Triggers on write events to a specific table + + ### On-demand triggers + Format: `request:REQUEST_PATH` + + Creates an HTTP endpoint `/api/v3/engine/REQUEST_PATH` for manual invocation: + - `request:hello-world` - Creates endpoint `/api/v3/engine/hello-world` + - `request:data-export` - Creates endpoint `/api/v3/engine/data-export` + pattern: ^(cron:[0-9 *,/-]+|every:[0-9]+[smhd]|all_tables|table:[a-zA-Z_][a-zA-Z0-9_]*|request:[a-zA-Z0-9_-]+)$ + example: cron:0 0 6 * * 1-5 + trigger_arguments: + type: object + additionalProperties: true + description: Optional arguments passed to the plugin. + disabled: + type: boolean + default: false + description: Whether the trigger is disabled. + required: + - db + - plugin_filename + - trigger_name + - trigger_settings + - trigger_specification + TriggerSettings: + type: object + description: | + Configuration settings for processing engine trigger error handling and execution behavior. + properties: + run_async: + type: boolean + default: false + description: | + Whether to run the trigger asynchronously. + When `true`, the trigger executes in the background without blocking. + When `false`, the trigger executes synchronously. + error_behavior: + type: string + enum: + - Log + - Retry + - Disable + description: | + Specifies how to handle errors that occur during trigger execution: + - `Log`: Log the error and continue (default) + - `Retry`: Retry the trigger execution + - `Disable`: Disable the trigger after an error + default: Log + required: + - run_async + - error_behavior + ShowDatabasesResponse: + type: object + properties: + databases: + type: array + items: + type: string + QueryResponse: + type: object + properties: + results: + type: array + items: + type: object + example: + results: + - series: + - name: mytable + columns: + - time + - value + values: + - - '2024-02-02T12:00:00Z' + - 42 + ErrorMessage: + type: object + properties: + error: + type: string + data: + type: object + nullable: true + LineProtocolError: + properties: + code: + description: Code is the machine-readable error code. + enum: + - internal error + - not found + - conflict + - invalid + - empty value + - unavailable + readOnly: true + type: string + err: + description: Stack of errors that occurred during processing of the request. Useful for debugging. + readOnly: true + type: string + line: + description: First line in the request body that contains malformed data. + format: int32 + readOnly: true + type: integer + message: + description: Human-readable message. + readOnly: true + type: string + op: + description: Describes the logical code operation when the error occurred. Useful for debugging. + readOnly: true + type: string + required: + - code + EpochCompatibility: + description: | + A unix timestamp precision. + - `h` for hours + - `m` for minutes + - `s` for seconds + - `ms` for milliseconds + - `u` or `µ` for microseconds + - `ns` for nanoseconds + enum: + - ns + - u + - µ + - ms + - s + - m + - h + type: string + UpdateDatabaseRequest: + type: object + properties: + retention_period: + type: string + description: | + The retention period for the database. Specifies how long data should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 7d + description: Request schema for updating database configuration. + UpdateTableRequest: + type: object + properties: + db: + type: string + description: The name of the database containing the table. + table: + type: string + description: The name of the table to update. + retention_period: + type: string + description: | + The retention period for the table. Specifies how long data in this table should be retained. + Use duration format (for example, "1d", "1h", "30m", "7d"). + example: 30d + required: + - db + - table + description: Request schema for updating table configuration. + LicenseResponse: + type: object + properties: + license_type: + type: string + description: The type of license (for example, "enterprise", "trial"). + example: enterprise + expires_at: + type: string + format: date-time + description: The expiration date of the license in ISO 8601 format. + example: '2025-12-31T23:59:59Z' + features: + type: array + items: + type: string + description: List of features enabled by the license. + example: + - clustering + - processing_engine + - advanced_auth + status: + type: string + enum: + - active + - expired + - invalid + description: The current status of the license. + example: active + description: Response schema for license information. + ResourceTokenObject: + type: object + properties: + token_name: + type: string + permissions: + type: array + items: + type: object + properties: + resource_type: + type: string + enum: + - system + - db + resource_identifier: + type: array + items: + type: string + actions: + type: array + items: + type: string + enum: + - read + - write + expiry_secs: + type: integer + description: The expiration time in seconds. + description: Response schema for resource token creation. + responses: + Unauthorized: + description: Unauthorized access. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorMessage' + BadRequest: + description: | + Request failed. Possible reasons: + + - Invalid database name + - Malformed request body + - Invalid timestamp precision + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorMessage' + Forbidden: + description: Access denied. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorMessage' + NotFound: + description: Resource not found. + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorMessage' + headers: + ClusterUUID: + description: | + The catalog UUID of the InfluxDB instance. + This header is included in all HTTP API responses and enables you to: + - Identify which cluster instance handled the request + - Monitor deployments across multiple InfluxDB instances + - Debug and troubleshoot distributed systems + schema: + type: string + format: uuid + example: 01234567-89ab-cdef-0123-456789abcdef + securitySchemes: + BasicAuthentication: + type: http + scheme: basic + description: | + Use the `Authorization` header with the `Basic` scheme to authenticate v1 API requests. + + Works with v1 compatibility [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints in InfluxDB 3. + + When authenticating requests, InfluxDB 3 checks that the `password` part of the decoded credential is an authorized token + and ignores the `username` part of the decoded credential. + + ### Example + + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s" \ + --user "":"AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 database + - **`AUTH_TOKEN`**: an admin token + + #### Related guides + + - [Authenticate v1 API requests](/influxdb/version/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb/version/admin/tokens/) + QuerystringAuthentication: + type: apiKey + in: query + name: u=&p= + description: | + Use InfluxDB 1.x API parameters to provide credentials through the query string for v1 API requests. + + Querystring authentication works with v1-compatible [`/write`](#operation/PostV1Write) and [`/query`](#operation/GetV1Query) endpoints. + + When authenticating requests, InfluxDB 3 checks that the `p` (_password_) query parameter is an authorized token + and ignores the `u` (_username_) query parameter. + + ### Syntax + + ```http + http://localhost:8181/query/?[u=any]&p=DATABASE_TOKEN + http://localhost:8181/write/?[u=any]&p=DATABASE_TOKEN + ``` + + ### Examples + + ```bash + curl "http://localhost:8181/write?db=DATABASE_NAME&precision=s&p=AUTH_TOKEN" \ + --header "Content-type: text/plain; charset=utf-8" \ + --data-binary 'home,room=kitchen temp=72 1641024000' + ``` + + Replace the following: + + - **`DATABASE_NAME`**: your InfluxDB 3 database + - **`AUTH_TOKEN`**: an admin token + + ```bash + ####################################### + # Use an InfluxDB 1.x compatible username and password + # to query the InfluxDB v1 HTTP API + ####################################### + # Use authentication query parameters: + # ?p=DATABASE_TOKEN + ####################################### + + curl --get "http://localhost:8181/query" \ + --data-urlencode "p=AUTH_TOKEN" \ + --data-urlencode "db=DATABASE_NAME" \ + --data-urlencode "q=SELECT * FROM MEASUREMENT" + ``` + + Replace the following: + + - **`DATABASE_NAME`**: the database to query + - **`AUTH_TOKEN`**: an [admin token](/influxdb/version/admin/tokens/) + + #### Related guides + + - [Authenticate v1 API requests](/influxdb/version/guides/api-compatibility/v1/) + - [Manage tokens](/influxdb/version/admin/tokens/) + BearerAuthentication: + type: http + scheme: bearer + bearerFormat: JWT + description: | + + Use the OAuth Bearer authentication + scheme to provide an authorization token to InfluxDB 3. + + Bearer authentication works with all endpoints. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Bearer` followed by a space and an admin token. + + + ### Syntax + + ```http + Authorization: Bearer AUTH_TOKEN + ``` + + ### Example + + ```bash + curl http://localhost:8181/api/v3/query_influxql \ + --header "Authorization: Bearer AUTH_TOKEN" + ``` + TokenAuthentication: + description: | + Use InfluxDB v2 Token authentication to provide an authorization token to InfluxDB 3. + + The v2 Token scheme works with v1 and v2 compatibility endpoints in InfluxDB 3. + + In your API requests, send an `Authorization` header. + For the header value, provide the word `Token` followed by a space and a database token. + The word `Token` is case-sensitive. + + ### Syntax + + ```http + Authorization: Token AUTH_TOKEN + ``` + + ### Example + + ```sh + ######################################################## + # Use the Token authentication scheme with /api/v2/write + # to write data. + ######################################################## + + curl --request post "http://localhost:8181/api/v2/write?bucket=DATABASE_NAME&precision=s" \ + --header "Authorization: Token AUTH_TOKEN" \ + --data-binary 'home,room=kitchen temp=72 1463683075' + ``` + + ### Related guides + + - [Manage tokens](/influxdb/version/admin/tokens/) + in: header + name: Authorization + type: apiKey diff --git a/api-docs/scripts/README.md b/api-docs/scripts/README.md new file mode 100644 index 0000000000..2ea5f440d1 --- /dev/null +++ b/api-docs/scripts/README.md @@ -0,0 +1,390 @@ +# API Documentation Generation Scripts + +TypeScript-based scripts for generating Hugo data files and content pages from OpenAPI specifications. + +## Overview + +These scripts convert OpenAPI v3 specifications into Hugo-compatible data files and content pages for all InfluxDB products. + +### What Gets Generated + +For each product, the scripts generate: + +1. **OpenAPI Spec Copies** (static directory): + - `influxdb-{product}.yml` - YAML version of the spec + - `influxdb-{product}.json` - JSON version of the spec + +2. **Path Group Fragments** (static/openapi/{product}/paths/): + - Separate YAML and JSON files for each API path group + - Example: `ref-api-v2-buckets.yaml` and `ref-api-v2-buckets.json` + +3. **Article Metadata** (data/article-data/influxdb/{product}/): + - `articles.yml` - Hugo data file with article metadata + - `articles.json` - JSON version for programmatic access + +4. **Hugo Content Pages** (content/{product}/api/): + - Markdown files generated from article data + - One page per API path group + +## Quick Start + +### Build Scripts + +Compile TypeScript to JavaScript (required before running): + +```bash +yarn build:api-scripts +``` + +### Generate API Pages + +**Generate all products:** + +```bash +yarn build:api-pages +``` + +**Generate specific product(s):** + +```bash +yarn build:api-pages:product cloud-v2 +yarn build:api-pages:product cloud-v2 oss-v2 +``` + +## Supported Products + +| Product ID | Description | Spec File | Content Path | +| ---------------------- | ------------------------- | ------------------------------------------------ | -------------------------------------------- | +| `cloud-v2` | InfluxDB Cloud (v2 API) | `api-docs/cloud/v2/ref.yml` | `content/influxdb/cloud/api/v2` | +| `oss-v2` | InfluxDB OSS v2 | `api-docs/v2/ref.yml` | `content/influxdb/v2/api/v2` | +| `influxdb3-core` | InfluxDB 3 Core | `api-docs/influxdb3/core/v3/ref.yml` | `content/influxdb3/core/reference/api` | +| `influxdb3-enterprise` | InfluxDB 3 Enterprise | `api-docs/influxdb3/enterprise/v3/ref.yml` | `content/influxdb3/enterprise/reference/api` | +| `cloud-dedicated` | InfluxDB Cloud Dedicated | `api-docs/influxdb3/cloud-dedicated/v2/ref.yml` | `content/influxdb/cloud-dedicated/api` | +| `cloud-serverless` | InfluxDB Cloud Serverless | `api-docs/influxdb3/cloud-serverless/v2/ref.yml` | `content/influxdb/cloud-serverless/api` | +| `clustered` | InfluxDB Clustered | `api-docs/influxdb3/clustered/v2/ref.yml` | `content/influxdb/clustered/api` | + +## Architecture + +### TypeScript Files + +``` +api-docs/scripts/ +├── tsconfig.json # TypeScript configuration +├── generate-openapi-articles.ts # Main orchestration script +└── openapi-paths-to-hugo-data/ + ├── index.ts # Core conversion logic + └── package.json # Module dependencies +``` + +### Compiled JavaScript + +After running `yarn build:api-scripts`, compiled files are in: + +``` +api-docs/scripts/dist/ +├── generate-openapi-articles.js +├── generate-openapi-articles.d.ts +└── openapi-paths-to-hugo-data/ + ├── index.js + └── index.d.ts +``` + +## Script Details + +### generate-openapi-articles.ts + +Main orchestration script that processes products. + +**For each product, it:** + +1. Runs `getswagger.sh` to fetch/bundle the OpenAPI spec +2. Copies spec to `static/openapi/influxdb-{product}.yml` +3. Generates JSON version at `static/openapi/influxdb-{product}.json` +4. Generates path group fragments (YAML and JSON) +5. Creates article metadata (YAML and JSON) +6. Generates Hugo content pages + +**Usage:** + +```bash +node api-docs/scripts/dist/generate-openapi-articles.js [product-ids...] + +# Examples: +node api-docs/scripts/dist/generate-openapi-articles.js # All products +node api-docs/scripts/dist/generate-openapi-articles.js cloud-v2 # Single product +node api-docs/scripts/dist/generate-openapi-articles.js cloud-v2 oss-v2 # Multiple products +``` + +**Output:** + +``` +📋 Processing all products... + +================================================================================ +Processing InfluxDB Cloud (v2 API) +================================================================================ + +Fetching OpenAPI spec for cloud-v2... +✓ Copied spec to static/openapi/influxdb-cloud-v2.yml +✓ Generated JSON spec at static/openapi/influxdb-cloud-v2.json + +Generating OpenAPI path files in static/openapi/influxdb-cloud-v2/paths.... +Generated: ref-api-v2-buckets.yaml and ref-api-v2-buckets.json +... + +Generating OpenAPI article data in data/article-data/influxdb/cloud-v2... +Generated 32 articles in data/article-data/influxdb/cloud-v2 + +✅ Successfully processed InfluxDB Cloud (v2 API) +``` + +### openapi-paths-to-hugo-data/index.ts + +Core conversion library that processes OpenAPI specs. + +**Key Functions:** + +- `generateHugoData(options)` - Main entry point +- `writePathOpenapis()` - Groups paths and writes fragments +- `createArticleDataForPathGroup()` - Generates article metadata + +**Path Grouping Logic:** + +Paths are grouped by their base path (first 3-4 segments, excluding placeholders): + +``` +/api/v2/buckets → api-v2-buckets +/api/v2/buckets/{id} → api-v2-buckets (same group) +/api/v2/authorizations → api-v2-authorizations +``` + +**Output Formats:** + +- **YAML**: Hugo-compatible data files +- **JSON**: Programmatic access and tooling + +## Development + +### Prerequisites + +- Node.js >= 16.0.0 +- Yarn package manager +- TypeScript installed (via root package.json) + +### Setup + +```bash +# Install dependencies (from repo root) +yarn install + +# Or install in the openapi-paths-to-hugo-data module +cd api-docs/scripts/openapi-paths-to-hugo-data +yarn install +``` + +### TypeScript Configuration + +The scripts use a dedicated `tsconfig.json` with CommonJS output: + +```json +{ + "compilerOptions": { + "target": "ES2021", + "module": "CommonJS", + "outDir": "./dist", + "strict": true, + ... + } +} +``` + +### Making Changes + +1. Edit TypeScript files in `api-docs/scripts/` +2. Compile: `yarn build:api-scripts` +3. Test: `yarn build:api-pages:product cloud-v2` + +### Watch Mode + +For active development: + +```bash +cd api-docs/scripts/openapi-paths-to-hugo-data +yarn build:watch +``` + +## Testing + +### Unit Test Example + +```javascript +const converter = require('./api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js'); + +converter.generateHugoData({ + specFile: 'api-docs/influxdb/cloud/v2/ref.yml', + dataOutPath: './test-output/paths', + articleOutPath: './test-output/articles' +}); +``` + +### Verify Output + +After generation, check: + +1. **Path fragments exist:** + ```bash + ls -l static/openapi/influxdb-cloud-v2/paths/ + ``` + +2. **Both formats generated:** + ```bash + ls -l static/openapi/influxdb-cloud-v2/paths/*.{yaml,json} + ``` + +3. **Article data created:** + ```bash + cat data/article-data/influxdb/cloud-v2/articles.yml + cat data/article-data/influxdb/cloud-v2/articles.json + ``` + +4. **Hugo pages generated:** + ```bash + ls -l content/influxdb/cloud/api/v2/ + ``` + +## Troubleshooting + +### TypeScript Compilation Errors + +```bash +# Clean and rebuild +rm -rf api-docs/scripts/dist +yarn build:api-scripts +``` + +### Missing Type Definitions + +```bash +cd api-docs/scripts/openapi-paths-to-hugo-data +yarn add --dev @types/js-yaml @types/node +``` + +### Spec File Not Found + +Make sure to run `getswagger.sh` first: + +```bash +cd api-docs +./getswagger.sh cloud-v2 -B +``` + +### Path Grouping Issues + +The script groups paths by their first 3-4 segments. If you need different grouping: + +1. Edit `writePathOpenapis()` in `openapi-paths-to-hugo-data/index.ts` +2. Modify the `key.slice(0, 4)` logic +3. Rebuild: `yarn build:api-scripts` + +## Migration from JavaScript + +The original JavaScript files are preserved for reference: + +- `api-docs/scripts/generate-openapi-articles.js` (original) +- `api-docs/scripts/openapi-paths-to-hugo-data/index.js` (original) + +### Key Improvements + +1. **TypeScript**: Full type safety and IDE support +2. **Dual Formats**: Generates both YAML and JSON +3. **All Products**: Includes all 7 InfluxDB products +4. **Better Errors**: Clear error messages with product validation +5. **CLI Arguments**: Support for processing specific products +6. **Comprehensive Logging**: Progress indicators and status messages + +## Related Documentation + +- **API Docs README**: `api-docs/README.md` - Complete API documentation workflow +- **OpenAPI Plugins**: `api-docs/openapi/plugins/` - Custom processing plugins +- **Hugo Data to Pages**: `hugo-data-to-pages/` - Page generation from data files + +## Examples + +### Generate Only Cloud Products + +```bash +yarn build:api-pages:product cloud-v2 cloud-dedicated cloud-serverless +``` + +### Generate Only InfluxDB 3 Products + +```bash +yarn build:api-pages:product influxdb3-core influxdb3-enterprise +``` + +### Process Single Product Manually + +```bash +# Compile first +yarn build:api-scripts + +# Run for specific product +node api-docs/scripts/dist/generate-openapi-articles.js oss-v2 +``` + +## API Reference + +### generateHugoData(options) + +Generate Hugo data files from an OpenAPI specification. + +**Parameters:** + +- `options.specFile` (string) - Path to the OpenAPI spec file +- `options.dataOutPath` (string) - Output path for OpenAPI path fragments +- `options.articleOutPath` (string) - Output path for article metadata + +**Example:** + +```javascript +const { generateHugoData } = require('./api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js'); + +generateHugoData({ + specFile: 'api-docs/influxdb/cloud/v2/ref.yml', + dataOutPath: 'static/openapi/influxdb-cloud-v2/paths', + articleOutPath: 'data/article-data/influxdb/cloud-v2' +}); +``` + +### productConfigs + +Map of product configurations exported from `generate-openapi-articles.ts`. + +**Type:** + +```typescript +type ProductConfig = { + specFile: string; // Path to OpenAPI spec + pagesDir: string; // Hugo content directory + description?: string; // Product description +}; + +const productConfigs: Record; +``` + +**Usage:** + +```javascript +const { productConfigs } = require('./api-docs/scripts/dist/generate-openapi-articles.js'); + +console.log(productConfigs['cloud-v2']); +// { +// specFile: 'api-docs/cloud/v2/ref.yml', +// pagesDir: 'content/influxdb/cloud/api/v2', +// description: 'InfluxDB Cloud (v2 API)' +// } +``` + +## License + +Same as parent docs-v2 repository (MIT). diff --git a/api-docs/scripts/apply-overlay.js b/api-docs/scripts/apply-overlay.js new file mode 100644 index 0000000000..a0a7b28b04 --- /dev/null +++ b/api-docs/scripts/apply-overlay.js @@ -0,0 +1,258 @@ +#!/usr/bin/env node +/** + * Apply OpenAPI Overlay to Base Spec + * + * Implements a subset of the OpenAPI Overlay Specification v1.0.0 + * to merge product-specific overlays onto a shared base spec. + * + * Supported overlay actions: + * - target: $.info.title (update info title) + * - target: $.info.description (update info description) + * - target: $.servers[0].description (update server description) + * - target: $.servers[0].variables.*.description (update variable description) + * - target: $.paths['/path'].method (add/update operation) + * - target: $.paths['/path'] (add entire path) + * + * Usage: + * node apply-overlay.js -o + * + * @module apply-overlay + */ + +const fs = require('fs'); +const yaml = require('js-yaml'); +const path = require('path'); + +/** + * Parse a JSONPath-like target string + * @param {string} target - JSONPath expression (e.g., "$.info.title") + * @returns {string[]} - Path segments + */ +function parseTarget(target) { + // Remove leading $. and split by . or bracket notation + const cleaned = target.replace(/^\$\.?/, ''); + const segments = []; + let current = ''; + let inBracket = false; + + for (let i = 0; i < cleaned.length; i++) { + const char = cleaned[i]; + if (char === '[' && !inBracket) { + if (current) segments.push(current); + current = ''; + inBracket = true; + } else if (char === ']' && inBracket) { + // Remove quotes from bracket content + segments.push(current.replace(/^['"]|['"]$/g, '')); + current = ''; + inBracket = false; + } else if (char === '.' && !inBracket) { + if (current) segments.push(current); + current = ''; + } else { + current += char; + } + } + if (current) segments.push(current); + + return segments; +} + +/** + * Get a value from an object using path segments + * @param {object} obj - Source object + * @param {string[]} segments - Path segments + * @returns {*} - Value at path + */ +function getPath(obj, segments) { + let current = obj; + for (const segment of segments) { + if (current === undefined || current === null) return undefined; + // Handle array index + if (/^\d+$/.test(segment)) { + current = current[parseInt(segment, 10)]; + } else { + current = current[segment]; + } + } + return current; +} + +/** + * Set a value in an object using path segments + * @param {object} obj - Target object + * @param {string[]} segments - Path segments + * @param {*} value - Value to set + */ +function setPath(obj, segments, value) { + let current = obj; + for (let i = 0; i < segments.length - 1; i++) { + const segment = segments[i]; + const nextSegment = segments[i + 1]; + + // Handle array index + if (/^\d+$/.test(segment)) { + const idx = parseInt(segment, 10); + if (current[idx] === undefined) { + current[idx] = /^\d+$/.test(nextSegment) ? [] : {}; + } + current = current[idx]; + } else { + if (current[segment] === undefined) { + current[segment] = /^\d+$/.test(nextSegment) ? [] : {}; + } + current = current[segment]; + } + } + + const lastSegment = segments[segments.length - 1]; + if (/^\d+$/.test(lastSegment)) { + current[parseInt(lastSegment, 10)] = value; + } else { + current[lastSegment] = value; + } +} + +/** + * Deep merge two objects + * Arrays are replaced, not merged. + * @param {object} target - Target object + * @param {object} source - Source object to merge + * @returns {object} - Merged object + */ +function deepMerge(target, source) { + // Arrays are replaced entirely, not merged + if (Array.isArray(source)) { + return source; + } + + if (typeof source !== 'object' || source === null) { + return source; + } + if (typeof target !== 'object' || target === null || Array.isArray(target)) { + return source; + } + + const result = { ...target }; + for (const key of Object.keys(source)) { + if ( + typeof source[key] === 'object' && + source[key] !== null && + !Array.isArray(source[key]) + ) { + result[key] = deepMerge(result[key], source[key]); + } else { + result[key] = source[key]; + } + } + return result; +} + +/** + * Apply overlay actions to base spec + * @param {object} base - Base OpenAPI spec + * @param {object} overlay - Overlay spec with actions + * @returns {object} - Merged spec + */ +function applyOverlay(base, overlay) { + const result = JSON.parse(JSON.stringify(base)); // Deep clone + + if (!overlay.actions || !Array.isArray(overlay.actions)) { + console.warn('Warning: No actions found in overlay'); + return result; + } + + for (const action of overlay.actions) { + if (!action.target) { + console.warn('Warning: Action missing target, skipping'); + continue; + } + + const segments = parseTarget(action.target); + + if (action.update !== undefined) { + // Get existing value at path + const existing = getPath(result, segments); + + if ( + existing !== undefined && + typeof existing === 'object' && + typeof action.update === 'object' + ) { + // Merge objects + setPath(result, segments, deepMerge(existing, action.update)); + } else { + // Replace value + setPath(result, segments, action.update); + } + + console.log(`Applied: ${action.target}`); + } else if (action.remove === true) { + // Remove is not implemented yet + console.warn( + `Warning: remove action not implemented for ${action.target}` + ); + } + } + + return result; +} + +/** + * Main function + */ +function main() { + const args = process.argv.slice(2); + + // Parse arguments + let baseFile = null; + let overlayFile = null; + let outputFile = null; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '-o' || args[i] === '--output') { + outputFile = args[++i]; + } else if (!baseFile) { + baseFile = args[i]; + } else if (!overlayFile) { + overlayFile = args[i]; + } + } + + if (!baseFile || !overlayFile) { + console.error( + 'Usage: node apply-overlay.js -o ' + ); + process.exit(1); + } + + // Read files + console.log(`Base: ${baseFile}`); + console.log(`Overlay: ${overlayFile}`); + + const baseContent = fs.readFileSync(baseFile, 'utf8'); + const overlayContent = fs.readFileSync(overlayFile, 'utf8'); + + const base = yaml.load(baseContent); + const overlay = yaml.load(overlayContent); + + // Apply overlay + const result = applyOverlay(base, overlay); + + // Output + const outputYaml = yaml.dump(result, { + lineWidth: -1, // Don't wrap lines + noRefs: true, // Don't use YAML references + quotingType: "'", + forceQuotes: false, + }); + + if (outputFile) { + fs.writeFileSync(outputFile, outputYaml); + console.log(`Output: ${outputFile}`); + } else { + console.log(outputYaml); + } +} + +main(); diff --git a/api-docs/scripts/dist/generate-openapi-articles.js b/api-docs/scripts/dist/generate-openapi-articles.js new file mode 100644 index 0000000000..7c9cb786e1 --- /dev/null +++ b/api-docs/scripts/dist/generate-openapi-articles.js @@ -0,0 +1,875 @@ +#!/usr/bin/env node +'use strict'; +/** + * Generate OpenAPI Articles Script + * + * Generates Hugo data files and content pages from OpenAPI specifications + * for all InfluxDB products. + * + * This script: + * 1. Runs getswagger.sh to fetch/bundle OpenAPI specs + * 2. Copies specs to static directory for download + * 3. Generates path group fragments (YAML and JSON) + * 4. Creates article metadata (YAML and JSON) + * 5. Generates Hugo content pages from article data + * + * Usage: + * node generate-openapi-articles.js # Generate all products + * node generate-openapi-articles.js cloud-v2 # Generate single product + * node generate-openapi-articles.js cloud-v2 oss-v2 # Generate multiple products + * + * @module generate-openapi-articles + */ +var __createBinding = + (this && this.__createBinding) || + (Object.create + ? function (o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if ( + !desc || + ('get' in desc ? !m.__esModule : desc.writable || desc.configurable) + ) { + desc = { + enumerable: true, + get: function () { + return m[k]; + }, + }; + } + Object.defineProperty(o, k2, desc); + } + : function (o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; + }); +var __setModuleDefault = + (this && this.__setModuleDefault) || + (Object.create + ? function (o, v) { + Object.defineProperty(o, 'default', { enumerable: true, value: v }); + } + : function (o, v) { + o['default'] = v; + }); +var __importStar = + (this && this.__importStar) || + (function () { + var ownKeys = function (o) { + ownKeys = + Object.getOwnPropertyNames || + function (o) { + var ar = []; + for (var k in o) + if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) + for (var k = ownKeys(mod), i = 0; i < k.length; i++) + if (k[i] !== 'default') __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; + })(); +Object.defineProperty(exports, '__esModule', { value: true }); +exports.LINK_PATTERN = + exports.MARKDOWN_FIELDS = + exports.productConfigs = + void 0; +exports.processProduct = processProduct; +exports.generateDataFromOpenAPI = generateDataFromOpenAPI; +exports.generatePagesFromArticleData = generatePagesFromArticleData; +exports.deriveProductPath = deriveProductPath; +exports.transformDocLinks = transformDocLinks; +exports.validateDocLinks = validateDocLinks; +exports.resolveContentPath = resolveContentPath; +const child_process_1 = require('child_process'); +const path = __importStar(require('path')); +const fs = __importStar(require('fs')); +// Import the OpenAPI to Hugo converter +const openapiPathsToHugo = require('./openapi-paths-to-hugo-data/index.js'); +// Calculate the relative paths +const DOCS_ROOT = '.'; +const API_DOCS_ROOT = 'api-docs'; +// CLI flags +const validateLinks = process.argv.includes('--validate-links'); +/** + * Execute a shell command and handle errors + * + * @param command - Command to execute + * @param description - Human-readable description of the command + * @throws Exits process with code 1 on error + */ +function execCommand(command, description) { + try { + if (description) { + console.log(`\n${description}...`); + } + console.log(`Executing: ${command}\n`); + (0, child_process_1.execSync)(command, { stdio: 'inherit' }); + } catch (error) { + console.error(`\n❌ Error executing command: ${command}`); + if (error instanceof Error) { + console.error(error.message); + } + process.exit(1); + } +} +/** + * Generate a clean static directory name from a product key. + * Handles the influxdb3_* products to avoid redundant 'influxdb-influxdb3' prefixes. + * + * @param productKey - Product identifier (e.g., 'cloud-v2', 'influxdb3_core') + * @returns Clean directory name (e.g., 'influxdb-cloud-v2', 'influxdb3-core') + */ +function getStaticDirName(productKey) { + // For influxdb3_* products, convert underscore to hyphen and don't add prefix + if (productKey.startsWith('influxdb3_')) { + return productKey.replace('_', '-'); + } + // For other products, add 'influxdb-' prefix + return `influxdb-${productKey}`; +} +/** + * Generate Hugo data files from OpenAPI specification + * + * @param specFile - Path to the OpenAPI spec file + * @param dataOutPath - Output path for OpenAPI path fragments + * @param articleOutPath - Output path for article metadata + */ +function generateDataFromOpenAPI(specFile, dataOutPath, articleOutPath) { + if (!fs.existsSync(dataOutPath)) { + fs.mkdirSync(dataOutPath, { recursive: true }); + } + openapiPathsToHugo.generateHugoData({ + dataOutPath, + articleOutPath, + specFile, + }); +} +/** + * Generate Hugo content pages from article data + * + * Creates markdown files with frontmatter from article metadata. + * Each article becomes a page with type: api that renders via Scalar. + * + * @param options - Generation options + */ +function generatePagesFromArticleData(options) { + const { + articlesPath, + contentPath, + menuKey, + menuParent, + productDescription, + skipParentMenu, + } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + // Read articles data + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent); + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + // Ensure content directory exists + if (!fs.existsSync(contentPath)) { + fs.mkdirSync(contentPath, { recursive: true }); + } + // Determine the API parent directory from the first article's path + // e.g., if article path is "api/v1/health", the API root is "api" + const firstArticlePath = data.articles[0]?.path || ''; + const apiRootDir = firstArticlePath.split('/')[0]; + // Generate parent _index.md for the API section + if (apiRootDir) { + const apiParentDir = path.join(contentPath, apiRootDir); + const parentIndexFile = path.join(apiParentDir, '_index.md'); + if (!fs.existsSync(apiParentDir)) { + fs.mkdirSync(apiParentDir, { recursive: true }); + } + if (!fs.existsSync(parentIndexFile)) { + const parentFrontmatter = { + title: menuParent || 'HTTP API', + description: + productDescription || + 'API reference documentation for all available endpoints.', + weight: 104, + }; + // Add menu entry for parent page (unless skipParentMenu is true) + if (menuKey && !skipParentMenu) { + parentFrontmatter.menu = { + [menuKey]: { + name: menuParent || 'HTTP API', + }, + }; + } + const parentContent = `--- +${yaml.dump(parentFrontmatter)}--- +`; + fs.writeFileSync(parentIndexFile, parentContent); + console.log(`✓ Generated parent index at ${parentIndexFile}`); + } + } + // Generate a page for each article + for (const article of data.articles) { + const pagePath = path.join(contentPath, article.path); + const pageFile = path.join(pagePath, '_index.md'); + // Create directory if needed + if (!fs.existsSync(pagePath)) { + fs.mkdirSync(pagePath, { recursive: true }); + } + // Build frontmatter object + // Use menuName for display (actual endpoint path like /health) + // Fall back to name or path if menuName is not set + const displayName = + article.fields.menuName || article.fields.name || article.path; + const frontmatter = { + title: displayName, + description: `API reference for ${displayName}`, + type: 'api', + // Use explicit layout to override Hugo's default section template lookup + // (Hugo's section lookup ignores `type`, so we need `layout` for the 3-column API layout) + layout: 'list', + staticFilePath: article.fields.staticFilePath, + weight: 100, + }; + // Add menu entry if menuKey is provided + // Use menuName for menu display (shows actual endpoint path like /health) + if (menuKey) { + frontmatter.menu = { + [menuKey]: { + name: displayName, + ...(menuParent && { parent: menuParent }), + }, + }; + } + // Add related links if present in article fields + if ( + article.fields.related && + Array.isArray(article.fields.related) && + article.fields.related.length > 0 + ) { + frontmatter.related = article.fields.related; + } + // Add OpenAPI tags if present in article fields (for frontmatter metadata) + if ( + article.fields.apiTags && + Array.isArray(article.fields.apiTags) && + article.fields.apiTags.length > 0 + ) { + frontmatter.api_tags = article.fields.apiTags; + } + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + fs.writeFileSync(pageFile, pageContent); + } + console.log( + `✓ Generated ${data.articles.length} content pages in ${contentPath}` + ); +} +/** + * Generate Hugo content pages from tag-based article data + * + * Creates markdown files with frontmatter from article metadata. + * Each article becomes a page with type: api that renders via RapiDoc. + * Includes operation metadata for TOC generation. + * + * @param options - Generation options + */ +function generateTagPagesFromArticleData(options) { + const { + articlesPath, + contentPath, + menuKey, + menuParent, + productDescription, + skipParentMenu, + pathSpecFiles, + } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + // Read articles data + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent); + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + // Ensure content directory exists + if (!fs.existsSync(contentPath)) { + fs.mkdirSync(contentPath, { recursive: true }); + } + // Generate parent _index.md for the API section + const apiParentDir = path.join(contentPath, 'api'); + const parentIndexFile = path.join(apiParentDir, '_index.md'); + if (!fs.existsSync(apiParentDir)) { + fs.mkdirSync(apiParentDir, { recursive: true }); + } + if (!fs.existsSync(parentIndexFile)) { + const parentFrontmatter = { + title: menuParent || 'HTTP API', + description: + productDescription || + 'API reference documentation for all available endpoints.', + weight: 104, + }; + // Add menu entry for parent page (unless skipParentMenu is true) + if (menuKey && !skipParentMenu) { + parentFrontmatter.menu = { + [menuKey]: { + name: menuParent || 'HTTP API', + }, + }; + } + const parentContent = `--- +${yaml.dump(parentFrontmatter)}--- +`; + fs.writeFileSync(parentIndexFile, parentContent); + console.log(`✓ Generated parent index at ${parentIndexFile}`); + } + // Generate a page for each article (tag) + for (const article of data.articles) { + const pagePath = path.join(contentPath, article.path); + const pageFile = path.join(pagePath, '_index.md'); + // Create directory if needed + if (!fs.existsSync(pagePath)) { + fs.mkdirSync(pagePath, { recursive: true }); + } + // Build frontmatter object + const title = article.fields.title || article.fields.name || article.path; + const isConceptual = article.fields.isConceptual === true; + const frontmatter = { + title, + description: article.fields.description || `API reference for ${title}`, + type: 'api', + layout: isConceptual ? 'single' : 'list', + staticFilePath: article.fields.staticFilePath, + weight: 100, + // Tag-based fields + tag: article.fields.tag, + isConceptual, + menuGroup: article.fields.menuGroup, + }; + // Add operations for TOC generation (only for non-conceptual pages) + if ( + !isConceptual && + article.fields.operations && + article.fields.operations.length > 0 + ) { + frontmatter.operations = article.fields.operations; + } + // Add tag description for conceptual pages + if (isConceptual && article.fields.tagDescription) { + frontmatter.tagDescription = article.fields.tagDescription; + } + // Add showSecuritySchemes flag for authentication pages + if (article.fields.showSecuritySchemes) { + frontmatter.showSecuritySchemes = true; + } + // Note: We deliberately don't add menu entries for tag-based API pages. + // The API sidebar navigation (api/sidebar-nav.html) handles navigation + // for API reference pages, avoiding conflicts with existing menu items + // like "Query data" and "Write data" that exist in the main sidebar. + // Add related links if present in article fields + if ( + article.fields.related && + Array.isArray(article.fields.related) && + article.fields.related.length > 0 + ) { + frontmatter.related = article.fields.related; + } + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + fs.writeFileSync(pageFile, pageContent); + } + console.log( + `✓ Generated ${data.articles.length} tag-based content pages in ${contentPath}` + ); + // Generate individual operation pages for standalone URLs + generateOperationPages({ + articlesPath, + contentPath, + pathSpecFiles, + }); +} +/** + * Convert API path to URL-safe slug + * + * Transforms an API path like "/api/v3/write_lp" to a URL-friendly format. + * Removes leading slash and uses the path as-is (underscores are URL-safe). + * + * @param apiPath - The API path (e.g., "/write", "/api/v3/write_lp") + * @returns URL-safe path slug (e.g., "write", "api/v3/write_lp") + */ +function apiPathToSlug(apiPath) { + // Remove leading slash, keep underscores (they're URL-safe) + return apiPath.replace(/^\//, ''); +} +/** + * Generate standalone Hugo content pages for each API operation + * + * Creates individual pages at path-based URLs like /api/write/post/ + * for each operation, using RapiDoc Mini. + * + * When pathSpecFiles is provided, uses path-specific specs for single-operation + * rendering (filters by method only, avoiding path prefix conflicts). + * Falls back to tag-based specs when pathSpecFiles is not available. + * + * @param options - Generation options + */ +function generateOperationPages(options) { + const { articlesPath, contentPath, pathSpecFiles } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + // Read articles data + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent); + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + let operationCount = 0; + // Process each article (tag) and generate pages for its operations + for (const article of data.articles) { + // Skip conceptual articles (they don't have operations) + if (article.fields.isConceptual) { + continue; + } + const operations = article.fields.operations || []; + const tagSpecFile = article.fields.staticFilePath; + const tagName = article.fields.tag || article.fields.name || ''; + for (const op of operations) { + // Build operation page path: api/{path}/{method}/ + // e.g., /write -> api/write/post/ + // e.g., /api/v3/write_lp -> api/api/v3/write_lp/post/ + const pathSlug = apiPathToSlug(op.path); + const method = op.method.toLowerCase(); + const operationDir = path.join(contentPath, 'api', pathSlug, method); + const operationFile = path.join(operationDir, '_index.md'); + // Create directory if needed + if (!fs.existsSync(operationDir)) { + fs.mkdirSync(operationDir, { recursive: true }); + } + // Build frontmatter + const title = op.summary || `${op.method} ${op.path}`; + // Determine spec file and match-paths based on availability of path-specific specs + // Path-specific specs isolate the path at file level, so we only filter by method + // This avoids substring matching issues (e.g., /admin matching /admin/regenerate) + const pathSpecFile = pathSpecFiles?.get(op.path); + const specFile = pathSpecFile || tagSpecFile; + const matchPaths = pathSpecFile ? method : `${method} ${op.path}`; + const frontmatter = { + title, + description: `API reference for ${op.method} ${op.path}`, + type: 'api-operation', + layout: 'operation', + // RapiDoc Mini configuration + specFile, + // When using path-specific spec: just method (e.g., "post") + // When using tag spec: method + path (e.g., "post /write") + matchPaths, + // Operation metadata + operationId: op.operationId, + method: op.method, + apiPath: op.path, + tag: tagName, + }; + // Add compatibility version if present + if (op.compatVersion) { + frontmatter.compatVersion = op.compatVersion; + } + // Add related links from operation's externalDocs + if (op.externalDocs?.url) { + frontmatter.related = [op.externalDocs.url]; + } + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + fs.writeFileSync(operationFile, pageContent); + operationCount++; + } + } + console.log( + `✓ Generated ${operationCount} operation pages in ${contentPath}/api/` + ); +} +/** + * Product configurations for all InfluxDB editions + * + * Maps product identifiers to their OpenAPI specs and content directories + */ +const productConfigs = { + // TODO: v2 products (cloud-v2, oss-v2) are disabled for now because they + // have existing Redoc-based API reference at /reference/api/ + // Uncomment when ready to migrate v2 products to Scalar + // 'cloud-v2': { + // specFile: path.join(API_DOCS_ROOT, 'influxdb/cloud/v2/ref.yml'), + // pagesDir: path.join(DOCS_ROOT, 'content/influxdb/cloud/api'), + // description: 'InfluxDB Cloud (v2 API)', + // menuKey: 'influxdb_cloud', + // }, + // 'oss-v2': { + // specFile: path.join(API_DOCS_ROOT, 'influxdb/v2/v2/ref.yml'), + // pagesDir: path.join(DOCS_ROOT, 'content/influxdb/v2/api'), + // description: 'InfluxDB OSS v2', + // menuKey: 'influxdb_v2', + // }, + // InfluxDB 3 products use tag-based generation for better UX + // Keys use underscores to match Hugo data directory structure + influxdb3_core: { + specFile: path.join(API_DOCS_ROOT, 'influxdb3/core/v3/ref.yml'), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/core'), + description: 'InfluxDB 3 Core', + menuKey: 'influxdb3_core', + useTagBasedGeneration: true, + }, + influxdb3_enterprise: { + specFile: path.join(API_DOCS_ROOT, 'influxdb3/enterprise/v3/ref.yml'), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/enterprise'), + description: 'InfluxDB 3 Enterprise', + menuKey: 'influxdb3_enterprise', + useTagBasedGeneration: true, + }, + // Note: Cloud Dedicated, Serverless, and Clustered use management APIs + // with paths like /accounts/{accountId}/... so we put them under /api/ + // These products have existing /reference/api/ pages with menu entries, + // so we skip adding menu entries to the generated parent pages. + 'cloud-dedicated': { + specFile: path.join( + API_DOCS_ROOT, + 'influxdb3/cloud-dedicated/management/openapi.yml' + ), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/cloud-dedicated/api'), + description: 'InfluxDB Cloud Dedicated', + menuKey: 'influxdb3_cloud_dedicated', + skipParentMenu: true, + }, + 'cloud-serverless': { + specFile: path.join( + API_DOCS_ROOT, + 'influxdb3/cloud-serverless/management/openapi.yml' + ), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/cloud-serverless/api'), + description: 'InfluxDB Cloud Serverless', + menuKey: 'influxdb3_cloud_serverless', + skipParentMenu: true, + }, + clustered: { + specFile: path.join( + API_DOCS_ROOT, + 'influxdb3/clustered/management/openapi.yml' + ), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/clustered/api'), + description: 'InfluxDB Clustered', + menuKey: 'influxdb3_clustered', + skipParentMenu: true, + }, +}; +exports.productConfigs = productConfigs; +/** Fields that can contain markdown with links */ +const MARKDOWN_FIELDS = new Set(['description', 'summary']); +exports.MARKDOWN_FIELDS = MARKDOWN_FIELDS; +/** Link placeholder pattern */ +const LINK_PATTERN = /\/influxdb\/version\//g; +exports.LINK_PATTERN = LINK_PATTERN; +/** + * Derive documentation root from spec file path. + * + * @example + * 'api-docs/influxdb3/core/v3/ref.yml' → '/influxdb3/core' + * 'api-docs/influxdb3/enterprise/v3/ref.yml' → '/influxdb3/enterprise' + * 'api-docs/influxdb/v2/ref.yml' → '/influxdb/v2' + */ +function deriveProductPath(specPath) { + // Match: api-docs/(influxdb3|influxdb)/(product-or-version)/... + const match = specPath.match(/api-docs\/(influxdb3?)\/([\w-]+)\//); + if (!match) { + throw new Error(`Cannot derive product path from: ${specPath}`); + } + return `/${match[1]}/${match[2]}`; +} +/** + * Transform documentation links in OpenAPI spec markdown fields. + * Replaces `/influxdb/version/` with the actual product path. + * + * @param spec - Parsed OpenAPI spec object + * @param productPath - Target path (e.g., '/influxdb3/core') + * @returns Spec with transformed links (new object, original unchanged) + */ +function transformDocLinks(spec, productPath) { + function transformValue(value) { + if (typeof value === 'string') { + return value.replace(LINK_PATTERN, `${productPath}/`); + } + if (Array.isArray(value)) { + return value.map(transformValue); + } + if (value !== null && typeof value === 'object') { + return transformObject(value); + } + return value; + } + function transformObject(obj) { + const result = {}; + for (const [key, value] of Object.entries(obj)) { + if (MARKDOWN_FIELDS.has(key) && typeof value === 'string') { + result[key] = value.replace(LINK_PATTERN, `${productPath}/`); + } else if (value !== null && typeof value === 'object') { + result[key] = transformValue(value); + } else { + result[key] = value; + } + } + return result; + } + return transformObject(spec); +} +/** + * Resolve a URL path to a content file path. + * + * @example + * '/influxdb3/core/api/auth/' → 'content/influxdb3/core/api/auth/_index.md' + */ +function resolveContentPath(urlPath, contentDir) { + const normalized = urlPath.replace(/\/$/, ''); + const indexPath = path.join(contentDir, normalized, '_index.md'); + const directPath = path.join(contentDir, normalized + '.md'); + if (fs.existsSync(indexPath)) { + return indexPath; + } + if (fs.existsSync(directPath)) { + return directPath; + } + return indexPath; // Return expected path for error message +} +/** + * Validate that transformed links point to existing content. + * + * @param spec - Transformed OpenAPI spec + * @param contentDir - Path to Hugo content directory + * @returns Array of error messages for broken links + */ +function validateDocLinks(spec, contentDir) { + const errors = []; + const linkPattern = /\[([^\]]+)\]\(([^)]+)\)/g; + function extractLinks(value, jsonPath) { + if (typeof value === 'string') { + let match; + while ((match = linkPattern.exec(value)) !== null) { + const [, linkText, linkUrl] = match; + // Only validate internal links (start with /) + if (linkUrl.startsWith('/') && !linkUrl.startsWith('//')) { + const contentPath = resolveContentPath(linkUrl, contentDir); + if (!fs.existsSync(contentPath)) { + errors.push( + `Broken link at ${jsonPath}: [${linkText}](${linkUrl})` + ); + } + } + } + // Reset regex lastIndex for next string + linkPattern.lastIndex = 0; + } else if (Array.isArray(value)) { + value.forEach((item, index) => + extractLinks(item, `${jsonPath}[${index}]`) + ); + } else if (value !== null && typeof value === 'object') { + for (const [key, val] of Object.entries(value)) { + extractLinks(val, `${jsonPath}.${key}`); + } + } + } + extractLinks(spec, 'spec'); + return errors; +} +/** + * Process a single product: fetch spec, generate data, and create pages + * + * @param productKey - Product identifier (e.g., 'cloud-v2') + * @param config - Product configuration + */ +function processProduct(productKey, config) { + console.log('\n' + '='.repeat(80)); + console.log(`Processing ${config.description || productKey}`); + console.log('='.repeat(80)); + const staticPath = path.join(DOCS_ROOT, 'static/openapi'); + const staticDirName = getStaticDirName(productKey); + const staticSpecPath = path.join(staticPath, `${staticDirName}.yml`); + const staticJsonSpecPath = path.join(staticPath, `${staticDirName}.json`); + const staticPathsPath = path.join(staticPath, `${staticDirName}/paths`); + const articlesPath = path.join( + DOCS_ROOT, + `data/article_data/influxdb/${productKey}` + ); + // Check if spec file exists + if (!fs.existsSync(config.specFile)) { + console.warn(`⚠️ Spec file not found: ${config.specFile}`); + console.log('Skipping this product. Run getswagger.sh first if needed.\n'); + return; + } + try { + // Step 1: Execute the getswagger.sh script to fetch/bundle the spec + // Note: getswagger.sh must run from api-docs/ because it uses relative paths + const getswaggerScript = path.join(API_DOCS_ROOT, 'getswagger.sh'); + if (fs.existsSync(getswaggerScript)) { + execCommand( + `cd ${API_DOCS_ROOT} && ./getswagger.sh ${productKey} -B`, + `Fetching OpenAPI spec for ${productKey}` + ); + } else { + console.log(`⚠️ getswagger.sh not found, skipping fetch step`); + } + // Step 2: Ensure static directory exists + if (!fs.existsSync(staticPath)) { + fs.mkdirSync(staticPath, { recursive: true }); + } + // Step 3: Load spec, transform documentation links, and write to static folder + if (fs.existsSync(config.specFile)) { + try { + const yaml = require('js-yaml'); + const specContent = fs.readFileSync(config.specFile, 'utf8'); + const specObject = yaml.load(specContent); + // Transform documentation links (/influxdb/version/ -> actual product path) + const productPath = deriveProductPath(config.specFile); + const transformedSpec = transformDocLinks(specObject, productPath); + console.log(`✓ Transformed documentation links to ${productPath}`); + // Validate links if enabled + if (validateLinks) { + const contentDir = path.resolve(__dirname, '../../content'); + const linkErrors = validateDocLinks(transformedSpec, contentDir); + if (linkErrors.length > 0) { + console.warn( + `\n⚠️ Link validation warnings for ${config.specFile}:` + ); + linkErrors.forEach((err) => console.warn(` ${err}`)); + } + } + // Write transformed spec to static folder (YAML) + fs.writeFileSync(staticSpecPath, yaml.dump(transformedSpec)); + console.log(`✓ Wrote transformed spec to ${staticSpecPath}`); + // Step 4: Generate JSON version of the spec + fs.writeFileSync( + staticJsonSpecPath, + JSON.stringify(transformedSpec, null, 2) + ); + console.log(`✓ Generated JSON spec at ${staticJsonSpecPath}`); + } catch (specError) { + console.warn(`⚠️ Could not process spec: ${specError}`); + } + } + // Step 5: Generate Hugo data from OpenAPI spec (using transformed spec) + if (config.useTagBasedGeneration) { + // Tag-based generation: group operations by OpenAPI tag + const staticTagsPath = path.join(staticPath, `${staticDirName}/tags`); + console.log(`\n📋 Using tag-based generation for ${productKey}...`); + openapiPathsToHugo.generateHugoDataByTag({ + specFile: staticSpecPath, + dataOutPath: staticTagsPath, + articleOutPath: articlesPath, + includePaths: true, // Also generate path-based files for backwards compatibility + }); + // Step 5b: Generate path-specific specs for operation pages + // Each path gets its own spec file, enabling method-only filtering + // This avoids substring matching issues (e.g., /admin matching /admin/regenerate) + console.log( + `\n📋 Generating path-specific specs in ${staticPathsPath}...` + ); + const pathSpecFiles = openapiPathsToHugo.generatePathSpecificSpecs( + staticSpecPath, + staticPathsPath + ); + // Step 6: Generate Hugo content pages from tag-based article data + generateTagPagesFromArticleData({ + articlesPath, + contentPath: config.pagesDir, + menuKey: config.menuKey, + menuParent: 'InfluxDB HTTP API', + skipParentMenu: config.skipParentMenu, + pathSpecFiles, + }); + } else { + // Path-based generation: group paths by URL prefix (legacy) + generateDataFromOpenAPI(staticSpecPath, staticPathsPath, articlesPath); + // Step 6: Generate Hugo content pages from path-based article data + generatePagesFromArticleData({ + articlesPath, + contentPath: config.pagesDir, + menuKey: config.menuKey, + menuParent: 'InfluxDB HTTP API', + skipParentMenu: config.skipParentMenu, + }); + } + console.log( + `\n✅ Successfully processed ${config.description || productKey}\n` + ); + } catch (error) { + console.error(`\n❌ Error processing ${productKey}:`, error); + process.exit(1); + } +} +/** + * Main execution function + */ +function main() { + const args = process.argv.slice(2); + // Determine which products to process + let productsToProcess; + if (args.length === 0) { + // No arguments: process all products + productsToProcess = Object.keys(productConfigs); + console.log('\n📋 Processing all products...\n'); + } else { + // Arguments provided: process only specified products + productsToProcess = args; + console.log( + `\n📋 Processing specified products: ${productsToProcess.join(', ')}\n` + ); + } + // Validate product keys + const invalidProducts = productsToProcess.filter( + (key) => !productConfigs[key] + ); + if (invalidProducts.length > 0) { + console.error( + `\n❌ Invalid product identifier(s): ${invalidProducts.join(', ')}` + ); + console.error('\nValid products:'); + Object.keys(productConfigs).forEach((key) => { + console.error(` - ${key}: ${productConfigs[key].description}`); + }); + process.exit(1); + } + // Process each product + productsToProcess.forEach((productKey) => { + const config = productConfigs[productKey]; + processProduct(productKey, config); + }); + console.log('\n' + '='.repeat(80)); + console.log('✅ All products processed successfully!'); + console.log('='.repeat(80) + '\n'); +} +// Execute if run directly +if (require.main === module) { + main(); +} +//# sourceMappingURL=generate-openapi-articles.js.map diff --git a/api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js b/api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js new file mode 100644 index 0000000000..79e1da5b31 --- /dev/null +++ b/api-docs/scripts/dist/openapi-paths-to-hugo-data/index.js @@ -0,0 +1,883 @@ +'use strict'; +/** + * OpenAPI to Hugo Data Converter + * + * Converts OpenAPI v3 specifications into Hugo-compatible data files. + * Generates both YAML and JSON versions of spec fragments grouped by path. + * + * @module openapi-paths-to-hugo-data + */ +var __createBinding = + (this && this.__createBinding) || + (Object.create + ? function (o, m, k, k2) { + if (k2 === undefined) k2 = k; + var desc = Object.getOwnPropertyDescriptor(m, k); + if ( + !desc || + ('get' in desc ? !m.__esModule : desc.writable || desc.configurable) + ) { + desc = { + enumerable: true, + get: function () { + return m[k]; + }, + }; + } + Object.defineProperty(o, k2, desc); + } + : function (o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; + }); +var __setModuleDefault = + (this && this.__setModuleDefault) || + (Object.create + ? function (o, v) { + Object.defineProperty(o, 'default', { enumerable: true, value: v }); + } + : function (o, v) { + o['default'] = v; + }); +var __importStar = + (this && this.__importStar) || + (function () { + var ownKeys = function (o) { + ownKeys = + Object.getOwnPropertyNames || + function (o) { + var ar = []; + for (var k in o) + if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k; + return ar; + }; + return ownKeys(o); + }; + return function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) + for (var k = ownKeys(mod), i = 0; i < k.length; i++) + if (k[i] !== 'default') __createBinding(result, mod, k[i]); + __setModuleDefault(result, mod); + return result; + }; + })(); +Object.defineProperty(exports, '__esModule', { value: true }); +exports.writePathSpecificSpecs = writePathSpecificSpecs; +exports.generateHugoDataByTag = generateHugoDataByTag; +exports.generateHugoData = generateHugoData; +exports.generatePathSpecificSpecs = generatePathSpecificSpecs; +const yaml = __importStar(require('js-yaml')); +const fs = __importStar(require('fs')); +const path = __importStar(require('path')); +/** + * Read a YAML file and parse it + * + * @param filepath - Path to the YAML file + * @param encoding - File encoding (default: 'utf8') + * @returns Parsed YAML content + */ +function readFile(filepath, encoding = 'utf8') { + const content = fs.readFileSync(filepath, encoding); + return yaml.load(content); +} +/** + * Write data to a YAML file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeDataFile(data, outputTo) { + fs.writeFileSync(outputTo, yaml.dump(data)); +} +/** + * Write data to a JSON file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeJsonFile(data, outputTo) { + fs.writeFileSync(outputTo, JSON.stringify(data, null, 2)); +} +/** + * OpenAPI utility functions + */ +const openapiUtils = { + /** + * Check if a path fragment is a placeholder (e.g., {id}) + * + * @param str - Path fragment to check + * @returns True if the fragment is a placeholder + */ + isPlaceholderFragment(str) { + const placeholderRegex = /^\{.*\}$/; + return placeholderRegex.test(str); + }, +}; +/** + * Convert tag name to URL-friendly slug + * + * @param tagName - Tag name (e.g., "Write data", "Processing engine") + * @returns URL-friendly slug (e.g., "write-data", "processing-engine") + */ +function slugifyTag(tagName) { + return tagName + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-|-$/g, ''); +} +/** + * Menu group mappings for tag-based navigation + * Maps OpenAPI tags to sidebar groups + */ +const TAG_MENU_GROUPS = { + // Concepts group + 'Quick start': 'Concepts', + Authentication: 'Concepts', + 'Headers and parameters': 'Concepts', + 'Response codes': 'Concepts', + // Data Operations group + 'Write data': 'Data Operations', + 'Query data': 'Data Operations', + 'Cache data': 'Data Operations', + // Administration group + Database: 'Administration', + Table: 'Administration', + Token: 'Administration', + // Processing Engine group + 'Processing engine': 'Processing Engine', + // Server group + 'Server information': 'Server', + // Compatibility group + 'Compatibility endpoints': 'Compatibility', +}; +/** + * Get menu group for a tag + * + * @param tagName - Tag name + * @returns Menu group name or 'Other' if not mapped + */ +function getMenuGroupForTag(tagName) { + return TAG_MENU_GROUPS[tagName] || 'Other'; +} +/** + * HTTP methods to check for operations + */ +const HTTP_METHODS = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', +]; +/** + * Extract all operations from an OpenAPI document grouped by tag + * + * @param openapi - OpenAPI document + * @returns Map of tag name to operations with that tag + */ +function extractOperationsByTag(openapi) { + const tagOperations = new Map(); + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method]; + if (operation) { + const opMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + // Add operation to each of its tags + (operation.tags || []).forEach((tag) => { + if (!tagOperations.has(tag)) { + tagOperations.set(tag, []); + } + tagOperations.get(tag).push(opMeta); + }); + } + }); + }); + return tagOperations; +} +/** + * Write OpenAPI specs grouped by tag to separate files + * Generates both YAML and JSON versions per tag + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writeTagOpenapis(openapi, prefix, outPath) { + const tagOperations = extractOperationsByTag(openapi); + // Process each tag + tagOperations.forEach((operations, tagName) => { + // Deep copy openapi + const doc = JSON.parse(JSON.stringify(openapi)); + // Filter paths to only include those with operations for this tag + const filteredPaths = {}; + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + const filteredPathItem = {}; + let hasOperations = false; + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method]; + if (operation?.tags?.includes(tagName)) { + // Clone the operation and restrict tags to only this tag + // This prevents RapiDoc from rendering the operation multiple times + // (once per tag) when an operation belongs to multiple tags + const filteredOperation = { ...operation, tags: [tagName] }; + filteredPathItem[method] = filteredOperation; + hasOperations = true; + } + }); + // Include path-level parameters if we have operations + if (hasOperations) { + if (pathItem.parameters) { + filteredPathItem.parameters = pathItem.parameters; + } + filteredPaths[pathKey] = filteredPathItem; + } + }); + doc.paths = filteredPaths; + // Filter tags to only include this tag (and trait tags for context) + if (doc.tags) { + doc.tags = doc.tags.filter( + (tag) => tag.name === tagName || tag['x-traitTag'] + ); + } + // Update info + const tagSlug = slugifyTag(tagName); + doc.info.title = tagName; + doc.info.description = `API reference for ${tagName}`; + doc['x-tagGroup'] = tagName; + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + console.log( + `Generated tag spec: ${baseFilename}.yaml (${Object.keys(filteredPaths).length} paths, ${operations.length} operations)` + ); + } catch (err) { + console.error(`Error writing tag group ${tagName}:`, err); + } + }); + // Also create specs for conceptual tags (x-traitTag) without operations + (openapi.tags || []).forEach((tag) => { + if (tag['x-traitTag'] && !tagOperations.has(tag.name)) { + const doc = JSON.parse(JSON.stringify(openapi)); + doc.paths = {}; + doc.tags = [tag]; + doc.info.title = tag.name; + doc.info.description = tag.description || `API reference for ${tag.name}`; + doc['x-tagGroup'] = tag.name; + const tagSlug = slugifyTag(tag.name); + try { + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + console.log(`Generated conceptual tag spec: ${baseFilename}.yaml`); + } catch (err) { + console.error(`Error writing conceptual tag ${tag.name}:`, err); + } + } + }); +} +/** + * Convert API path to filename-safe slug + * + * @param apiPath - API path (e.g., "/api/v3/configure/token/admin") + * @returns Filename-safe slug (e.g., "api-v3-configure-token-admin") + */ +function pathToFileSlug(apiPath) { + return apiPath + .replace(/^\//, '') // Remove leading slash + .replace(/\//g, '-') // Replace slashes with dashes + .replace(/[{}]/g, '') // Remove curly braces from path params + .replace(/-+/g, '-') // Collapse multiple dashes + .replace(/-$/, ''); // Remove trailing dash +} +/** + * Write path-specific OpenAPI specs (one file per exact API path) + * + * Each file contains all HTTP methods for a single path, enabling + * operation pages to filter by method only (no path prefix conflicts). + * + * @param openapi - OpenAPI document + * @param outPath - Output directory path (e.g., "static/openapi/{product}/paths") + * @returns Map of API path to spec file path (for use in frontmatter) + */ +function writePathSpecificSpecs(openapi, outPath) { + const pathSpecFiles = new Map(); + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + Object.entries(openapi.paths).forEach(([apiPath, pathItem]) => { + // Deep clone pathItem to avoid mutating original + const clonedPathItem = JSON.parse(JSON.stringify(pathItem)); + // Limit each operation to a single tag to prevent duplicate rendering in RapiDoc + // RapiDoc renders operations once per tag, so multiple tags cause duplicates + const usedTags = new Set(); + HTTP_METHODS.forEach((method) => { + const operation = clonedPathItem[method]; + if (operation?.tags && operation.tags.length > 0) { + // Select the most specific tag to avoid duplicate rendering + // Prefer "Auth token" over "Authentication" for token-related operations + let primaryTag = operation.tags[0]; + if (operation.tags.includes('Auth token')) { + primaryTag = 'Auth token'; + } + operation.tags = [primaryTag]; + usedTags.add(primaryTag); + } + }); + // Create spec with just this path (all its methods) + // Include global security requirements so RapiDoc displays auth correctly + const pathSpec = { + openapi: openapi.openapi, + info: { + ...openapi.info, + title: apiPath, + description: `API reference for ${apiPath}`, + }, + paths: { [apiPath]: clonedPathItem }, + components: openapi.components, // Include for $ref resolution + servers: openapi.servers, + security: openapi.security, // Global security requirements + }; + // Filter spec-level tags to only include those used by operations + if (openapi.tags) { + pathSpec.tags = openapi.tags.filter( + (tag) => usedTags.has(tag.name) && !tag['x-traitTag'] + ); + } + // Write files + const slug = pathToFileSlug(apiPath); + const yamlPath = path.resolve(outPath, `${slug}.yaml`); + const jsonPath = path.resolve(outPath, `${slug}.json`); + writeDataFile(pathSpec, yamlPath); + writeJsonFile(pathSpec, jsonPath); + // Store the web-accessible path (without "static/" prefix) + // Hugo serves files from static/ at the root, so we extract the path after 'static/' + const staticMatch = yamlPath.match(/static\/(.+)$/); + const webPath = staticMatch ? `/${staticMatch[1]}` : yamlPath; + pathSpecFiles.set(apiPath, webPath); + }); + console.log( + `Generated ${pathSpecFiles.size} path-specific specs in ${outPath}` + ); + return pathSpecFiles; +} +/** + * Write OpenAPI specs grouped by path to separate files + * Generates both YAML and JSON versions + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writePathOpenapis(openapi, prefix, outPath) { + const pathGroups = {}; + // Group paths by their base path (first 3-4 segments, excluding placeholders) + Object.keys(openapi.paths) + .sort() + .forEach((p) => { + const delimiter = '/'; + let key = p.split(delimiter); + // Check if this is an item path (ends with a placeholder) + let isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + // Take first 4 segments + key = key.slice(0, 4); + // Check if the last segment is still a placeholder + isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + const groupKey = key.join('/'); + pathGroups[groupKey] = pathGroups[groupKey] || {}; + pathGroups[groupKey][p] = openapi.paths[p]; + }); + // Write each path group to separate YAML and JSON files + Object.keys(pathGroups).forEach((pg) => { + // Deep copy openapi + const doc = JSON.parse(JSON.stringify(openapi)); + doc.paths = pathGroups[pg]; + // Collect tags used by operations in this path group + const usedTags = new Set(); + Object.values(doc.paths).forEach((pathItem) => { + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + httpMethods.forEach((method) => { + const operation = pathItem[method]; + if (operation?.tags) { + operation.tags.forEach((tag) => usedTags.add(tag)); + } + }); + }); + // Filter tags to only include those used by operations in this path group + // Exclude x-traitTag tags (supplementary documentation tags) + if (doc.tags) { + doc.tags = doc.tags.filter( + (tag) => usedTags.has(tag.name) && !tag['x-traitTag'] + ); + } + // Simplify info for path-specific docs + doc.info.title = pg; + doc.info.description = `API reference for ${pg}`; + doc['x-pathGroup'] = pg; + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + const baseFilename = `${prefix}${pg.replaceAll('/', '-').replace(/^-/, '')}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + // Write both YAML and JSON versions + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + console.log(`Generated: ${baseFilename}.yaml and ${baseFilename}.json`); + } catch (err) { + console.error(`Error writing path group ${pg}:`, err); + } + }); +} +/** + * Create article metadata for a path group + * + * @param openapi - OpenAPI document with x-pathGroup + * @returns Article metadata object + */ +function createArticleDataForPathGroup(openapi) { + const article = { + path: '', + fields: { + name: openapi['x-pathGroup'] || '', + describes: Object.keys(openapi.paths), + }, + }; + /** + * Convert OpenAPI path to Hugo-friendly article path + * Legacy endpoints (without /api/ prefix) go under api/ directly + * Versioned endpoints (with /api/vN/) keep their structure + * + * @param p - Path to convert (e.g., '/health', '/api/v3/query_sql') + * @returns Path suitable for Hugo content directory (e.g., 'api/health', 'api/v3/query_sql') + */ + const toHugoPath = (p) => { + if (!p) { + return ''; + } + // If path doesn't start with /api/, it's a legacy endpoint + // Place it directly under api/ to avoid collision with /api/v1/* paths + if (!p.startsWith('/api/')) { + // /health -> api/health + // /write -> api/write + return `api${p}`; + } + // /api/v1/health -> api/v1/health + // /api/v2/write -> api/v2/write + // /api/v3/query_sql -> api/v3/query_sql + return p.replace(/^\//, ''); + }; + /** + * Convert path to tag-friendly format (dashes instead of slashes) + * + * @param p - Path to convert + * @returns Tag-friendly path + */ + const toTagPath = (p) => { + if (!p) { + return ''; + } + return p.replace(/^\//, '').replaceAll('/', '-'); + }; + const pathGroup = openapi['x-pathGroup'] || ''; + article.path = toHugoPath(pathGroup); + // Store original path for menu display (shows actual endpoint path) + article.fields.menuName = pathGroup; + article.fields.title = openapi.info?.title; + article.fields.description = openapi.description; + const pathGroupFrags = path.parse(openapi['x-pathGroup'] || ''); + article.fields.tags = [pathGroupFrags?.dir, pathGroupFrags?.name] + .filter(Boolean) + .map((t) => toTagPath(t)); + // Extract x-relatedLinks and OpenAPI tags from path items or operations + const relatedLinks = []; + const apiTags = []; + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + Object.values(openapi.paths).forEach((pathItem) => { + // Check path-level x-relatedLinks + if ( + pathItem['x-relatedLinks'] && + Array.isArray(pathItem['x-relatedLinks']) + ) { + relatedLinks.push( + ...pathItem['x-relatedLinks'].filter( + (link) => !relatedLinks.includes(link) + ) + ); + } + // Check operation-level x-relatedLinks and tags + httpMethods.forEach((method) => { + const operation = pathItem[method]; + if (operation) { + // Extract x-relatedLinks + if ( + operation['x-relatedLinks'] && + Array.isArray(operation['x-relatedLinks']) + ) { + relatedLinks.push( + ...operation['x-relatedLinks'].filter( + (link) => !relatedLinks.includes(link) + ) + ); + } + // Extract OpenAPI tags from operation + if (operation.tags && Array.isArray(operation.tags)) { + operation.tags.forEach((tag) => { + if (!apiTags.includes(tag)) { + apiTags.push(tag); + } + }); + } + } + }); + }); + // Only add related if there are links + if (relatedLinks.length > 0) { + article.fields.related = relatedLinks; + } + // Add OpenAPI tags from operations (for Hugo frontmatter) + if (apiTags.length > 0) { + article.fields.apiTags = apiTags; + } + return article; +} +/** + * Write OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing OpenAPI fragment files + * @param targetPath - Output path for article data + * @param opts - Options including file pattern filter + */ +function writeOpenapiArticleData(sourcePath, targetPath, opts) { + /** + * Check if path is a file + */ + const isFile = (filePath) => { + return fs.lstatSync(filePath).isFile(); + }; + /** + * Check if filename matches pattern + */ + const matchesPattern = (filePath) => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter( + (filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml') + ) // Only process YAML files + .map((filePath) => { + const openapi = readFile(filePath); + const article = createArticleDataForPathGroup(openapi); + article.fields.source = filePath; + // Hugo omits "/static" from the URI when serving files stored in "./static" + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + const articleCollection = { articles }; + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + console.log(`Generated ${articles.length} articles in ${targetPath}`); + } catch (e) { + console.error('Error writing article data:', e); + } +} +/** + * Create article data for a tag-based grouping + * + * @param openapi - OpenAPI document with x-tagGroup + * @param operations - Operations for this tag + * @param tagMeta - Tag metadata from OpenAPI spec + * @returns Article metadata object + */ +function createArticleDataForTag(openapi, operations, tagMeta) { + const tagName = openapi['x-tagGroup'] || ''; + const tagSlug = slugifyTag(tagName); + const isConceptual = tagMeta?.['x-traitTag'] === true; + const article = { + path: `api/${tagSlug}`, + fields: { + name: tagName, + describes: Object.keys(openapi.paths), + title: tagName, + description: + tagMeta?.description || + openapi.info?.description || + `API reference for ${tagName}`, + tag: tagName, + isConceptual, + menuGroup: getMenuGroupForTag(tagName), + operations: operations.map((op) => ({ + operationId: op.operationId, + method: op.method, + path: op.path, + summary: op.summary, + tags: op.tags, + ...(op.compatVersion && { compatVersion: op.compatVersion }), + ...(op.externalDocs && { externalDocs: op.externalDocs }), + })), + }, + }; + // Add tag description for conceptual pages + if (tagMeta?.description) { + article.fields.tagDescription = tagMeta.description; + } + // Show security schemes section on Authentication pages + if (tagName === 'Authentication') { + article.fields.showSecuritySchemes = true; + } + // Aggregate unique externalDocs URLs from operations into article-level related + // This populates Hugo frontmatter `related` field for "Related content" links + const relatedUrls = new Set(); + // First check tag-level externalDocs + if (tagMeta?.externalDocs?.url) { + relatedUrls.add(tagMeta.externalDocs.url); + } + // Then aggregate from operations + operations.forEach((op) => { + if (op.externalDocs?.url) { + relatedUrls.add(op.externalDocs.url); + } + }); + if (relatedUrls.size > 0) { + article.fields.related = Array.from(relatedUrls); + } + return article; +} +/** + * Write tag-based OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing tag-based OpenAPI fragment files + * @param targetPath - Output path for article data + * @param openapi - Original OpenAPI document (for tag metadata) + * @param opts - Options including file pattern filter + */ +function writeOpenapiTagArticleData(sourcePath, targetPath, openapi, opts) { + const isFile = (filePath) => { + return fs.lstatSync(filePath).isFile(); + }; + const matchesPattern = (filePath) => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + // Create tag metadata lookup + const tagMetaMap = new Map(); + (openapi.tags || []).forEach((tag) => { + tagMetaMap.set(tag.name, tag); + }); + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter( + (filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml') + ) + .map((filePath) => { + const tagOpenapi = readFile(filePath); + const tagName = + tagOpenapi['x-tagGroup'] || tagOpenapi.info?.title || ''; + const tagMeta = tagMetaMap.get(tagName); + // Extract operations from the tag-filtered spec + const operations = []; + Object.entries(tagOpenapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method]; + if (operation) { + const opMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + operations.push(opMeta); + } + }); + }); + const article = createArticleDataForTag( + tagOpenapi, + operations, + tagMeta + ); + article.fields.source = filePath; + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + const articleCollection = { articles }; + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + console.log( + `Generated ${articles.length} tag-based articles in ${targetPath}` + ); + } catch (e) { + console.error('Error writing tag article data:', e); + } +} +/** + * Generate Hugo data files from an OpenAPI specification grouped by tag + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups operations by their OpenAPI tags + * 3. Writes each tag group to separate YAML and JSON files + * 4. Generates tag-based article metadata for Hugo + * + * @param options - Generation options + */ +function generateHugoDataByTag(options) { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + const sourceFile = readFile(options.specFile, 'utf8'); + // Optionally generate path-based files for backwards compatibility + if (options.includePaths) { + console.log( + `\nGenerating OpenAPI path files in ${options.dataOutPath}....` + ); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + } + // Generate tag-based files + const tagOutPath = options.includePaths + ? path.join(options.dataOutPath, 'tags') + : options.dataOutPath; + console.log(`\nGenerating OpenAPI tag files in ${tagOutPath}....`); + writeTagOpenapis(sourceFile, filenamePrefix, tagOutPath); + console.log( + `\nGenerating OpenAPI tag article data in ${options.articleOutPath}...` + ); + writeOpenapiTagArticleData(tagOutPath, options.articleOutPath, sourceFile, { + filePattern: filenamePrefix, + }); + console.log('\nTag-based generation complete!\n'); +} +/** + * Generate Hugo data files from an OpenAPI specification + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups paths by their base path + * 3. Writes each group to separate YAML and JSON files + * 4. Generates article metadata for Hugo + * + * @param options - Generation options + */ +function generateHugoData(options) { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + const sourceFile = readFile(options.specFile, 'utf8'); + console.log(`\nGenerating OpenAPI path files in ${options.dataOutPath}....`); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + console.log( + `\nGenerating OpenAPI article data in ${options.articleOutPath}...` + ); + writeOpenapiArticleData(options.dataOutPath, options.articleOutPath, { + filePattern: filenamePrefix, + }); + console.log('\nGeneration complete!\n'); +} +/** + * Generate path-specific OpenAPI specs from a spec file + * + * Convenience wrapper that reads the spec file and generates path-specific specs. + * + * @param specFile - Path to OpenAPI spec file + * @param outPath - Output directory for path-specific specs + * @returns Map of API path to spec file web path (for use in frontmatter) + */ +function generatePathSpecificSpecs(specFile, outPath) { + const openapi = readFile(specFile, 'utf8'); + return writePathSpecificSpecs(openapi, outPath); +} +// CommonJS export for backward compatibility +module.exports = { + generateHugoData, + generateHugoDataByTag, + generatePathSpecificSpecs, + writePathSpecificSpecs, +}; +//# sourceMappingURL=index.js.map diff --git a/api-docs/scripts/generate-openapi-articles.ts b/api-docs/scripts/generate-openapi-articles.ts new file mode 100644 index 0000000000..8af33c2e61 --- /dev/null +++ b/api-docs/scripts/generate-openapi-articles.ts @@ -0,0 +1,1064 @@ +#!/usr/bin/env node +/** + * Generate OpenAPI Articles Script + * + * Generates Hugo data files and content pages from OpenAPI specifications + * for all InfluxDB products. + * + * This script: + * 1. Runs getswagger.sh to fetch/bundle OpenAPI specs + * 2. Copies specs to static directory for download + * 3. Generates path group fragments (YAML and JSON) + * 4. Creates article metadata (YAML and JSON) + * 5. Generates Hugo content pages from article data + * + * Usage: + * node generate-openapi-articles.js # Generate all products + * node generate-openapi-articles.js cloud-v2 # Generate single product + * node generate-openapi-articles.js cloud-v2 oss-v2 # Generate multiple products + * + * @module generate-openapi-articles + */ + +import { execSync } from 'child_process'; +import * as path from 'path'; +import * as fs from 'fs'; + +// Import the OpenAPI to Hugo converter +const openapiPathsToHugo = require('./openapi-paths-to-hugo-data/index.js'); + +/** + * Operation metadata structure from tag-based articles + */ +interface OperationMeta { + operationId: string; + method: string; + path: string; + summary: string; + tags: string[]; + /** Compatibility version (v1 or v2) for migration context */ + compatVersion?: string; + /** External documentation link */ + externalDocs?: { + description: string; + url: string; + }; +} + +/** + * Product configuration for API generation + */ +interface ProductConfig { + /** Path to the OpenAPI spec file */ + specFile: string; + /** Path to the Hugo content directory for generated pages */ + pagesDir: string; + /** Optional description of the product */ + description?: string; + /** Hugo menu identifier for this product (e.g., 'influxdb3_core') */ + menuKey?: string; + /** Skip adding menu entry to generated parent page (use when existing reference page has menu entry) */ + skipParentMenu?: boolean; + /** Use tag-based generation instead of path-based (default: false) */ + useTagBasedGeneration?: boolean; +} + +/** + * Map of product identifiers to their configuration + */ +type ProductConfigMap = Record; + +// Calculate the relative paths +const DOCS_ROOT = '.'; +const API_DOCS_ROOT = 'api-docs'; + +// CLI flags +const validateLinks = process.argv.includes('--validate-links'); + +/** + * Execute a shell command and handle errors + * + * @param command - Command to execute + * @param description - Human-readable description of the command + * @throws Exits process with code 1 on error + */ +function execCommand(command: string, description?: string): void { + try { + if (description) { + console.log(`\n${description}...`); + } + console.log(`Executing: ${command}\n`); + execSync(command, { stdio: 'inherit' }); + } catch (error) { + console.error(`\n❌ Error executing command: ${command}`); + if (error instanceof Error) { + console.error(error.message); + } + process.exit(1); + } +} + +/** + * Generate a clean static directory name from a product key. + * Handles the influxdb3_* products to avoid redundant 'influxdb-influxdb3' prefixes. + * + * @param productKey - Product identifier (e.g., 'cloud-v2', 'influxdb3_core') + * @returns Clean directory name (e.g., 'influxdb-cloud-v2', 'influxdb3-core') + */ +function getStaticDirName(productKey: string): string { + // For influxdb3_* products, convert underscore to hyphen and don't add prefix + if (productKey.startsWith('influxdb3_')) { + return productKey.replace('_', '-'); + } + // For other products, add 'influxdb-' prefix + return `influxdb-${productKey}`; +} + +/** + * Generate Hugo data files from OpenAPI specification + * + * @param specFile - Path to the OpenAPI spec file + * @param dataOutPath - Output path for OpenAPI path fragments + * @param articleOutPath - Output path for article metadata + */ +function generateDataFromOpenAPI( + specFile: string, + dataOutPath: string, + articleOutPath: string +): void { + if (!fs.existsSync(dataOutPath)) { + fs.mkdirSync(dataOutPath, { recursive: true }); + } + + openapiPathsToHugo.generateHugoData({ + dataOutPath, + articleOutPath, + specFile, + }); +} + +/** + * Options for generating pages from article data + */ +interface GeneratePagesOptions { + /** Path to the articles data directory */ + articlesPath: string; + /** Output path for generated content pages */ + contentPath: string; + /** Hugo menu identifier for navigation (e.g., 'influxdb3_core') */ + menuKey?: string; + /** Parent menu item name (e.g., 'InfluxDB HTTP API') */ + menuParent?: string; + /** Product description for the parent page */ + productDescription?: string; + /** Skip adding menu entry to generated parent page */ + skipParentMenu?: boolean; +} + +/** + * Generate Hugo content pages from article data + * + * Creates markdown files with frontmatter from article metadata. + * Each article becomes a page with type: api that renders via Scalar. + * + * @param options - Generation options + */ +function generatePagesFromArticleData(options: GeneratePagesOptions): void { + const { + articlesPath, + contentPath, + menuKey, + menuParent, + productDescription, + skipParentMenu, + } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + + // Read articles data + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent) as { + articles: Array<{ + path: string; + fields: Record; + }>; + }; + + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + + // Ensure content directory exists + if (!fs.existsSync(contentPath)) { + fs.mkdirSync(contentPath, { recursive: true }); + } + + // Determine the API parent directory from the first article's path + // e.g., if article path is "api/v1/health", the API root is "api" + const firstArticlePath = data.articles[0]?.path || ''; + const apiRootDir = firstArticlePath.split('/')[0]; + + // Generate parent _index.md for the API section + if (apiRootDir) { + const apiParentDir = path.join(contentPath, apiRootDir); + const parentIndexFile = path.join(apiParentDir, '_index.md'); + + if (!fs.existsSync(apiParentDir)) { + fs.mkdirSync(apiParentDir, { recursive: true }); + } + + if (!fs.existsSync(parentIndexFile)) { + const parentFrontmatter: Record = { + title: menuParent || 'HTTP API', + description: + productDescription || + 'API reference documentation for all available endpoints.', + weight: 104, + }; + + // Add menu entry for parent page (unless skipParentMenu is true) + if (menuKey && !skipParentMenu) { + parentFrontmatter.menu = { + [menuKey]: { + name: menuParent || 'HTTP API', + }, + }; + } + + const parentContent = `--- +${yaml.dump(parentFrontmatter)}--- +`; + + fs.writeFileSync(parentIndexFile, parentContent); + console.log(`✓ Generated parent index at ${parentIndexFile}`); + } + } + + // Generate a page for each article + for (const article of data.articles) { + const pagePath = path.join(contentPath, article.path); + const pageFile = path.join(pagePath, '_index.md'); + + // Create directory if needed + if (!fs.existsSync(pagePath)) { + fs.mkdirSync(pagePath, { recursive: true }); + } + + // Build frontmatter object + // Use menuName for display (actual endpoint path like /health) + // Fall back to name or path if menuName is not set + const displayName = + article.fields.menuName || article.fields.name || article.path; + const frontmatter: Record = { + title: displayName, + description: `API reference for ${displayName}`, + type: 'api', + // Use explicit layout to override Hugo's default section template lookup + // (Hugo's section lookup ignores `type`, so we need `layout` for the 3-column API layout) + layout: 'list', + staticFilePath: article.fields.staticFilePath, + weight: 100, + }; + + // Add menu entry if menuKey is provided + // Use menuName for menu display (shows actual endpoint path like /health) + if (menuKey) { + frontmatter.menu = { + [menuKey]: { + name: displayName, + ...(menuParent && { parent: menuParent }), + }, + }; + } + + // Add related links if present in article fields + if ( + article.fields.related && + Array.isArray(article.fields.related) && + article.fields.related.length > 0 + ) { + frontmatter.related = article.fields.related; + } + + // Add OpenAPI tags if present in article fields (for frontmatter metadata) + if ( + article.fields.apiTags && + Array.isArray(article.fields.apiTags) && + article.fields.apiTags.length > 0 + ) { + frontmatter.api_tags = article.fields.apiTags; + } + + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + + fs.writeFileSync(pageFile, pageContent); + } + + console.log( + `✓ Generated ${data.articles.length} content pages in ${contentPath}` + ); +} + +/** + * Options for generating tag-based pages from article data + */ +interface GenerateTagPagesOptions { + /** Path to the articles data directory */ + articlesPath: string; + /** Output path for generated content pages */ + contentPath: string; + /** Hugo menu identifier for navigation (e.g., 'influxdb3_core') */ + menuKey?: string; + /** Parent menu item name (e.g., 'InfluxDB HTTP API') */ + menuParent?: string; + /** Product description for the parent page */ + productDescription?: string; + /** Skip adding menu entry to generated parent page */ + skipParentMenu?: boolean; + /** Map of API path to path-specific spec file (for single-operation rendering) */ + pathSpecFiles?: Map; +} + +/** + * Generate Hugo content pages from tag-based article data + * + * Creates markdown files with frontmatter from article metadata. + * Each article becomes a page with type: api that renders via RapiDoc. + * Includes operation metadata for TOC generation. + * + * @param options - Generation options + */ +function generateTagPagesFromArticleData( + options: GenerateTagPagesOptions +): void { + const { + articlesPath, + contentPath, + menuKey, + menuParent, + productDescription, + skipParentMenu, + pathSpecFiles, + } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + + // Read articles data + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent) as { + articles: Array<{ + path: string; + fields: { + name?: string; + title?: string; + description?: string; + tag?: string; + isConceptual?: boolean; + showSecuritySchemes?: boolean; + tagDescription?: string; + menuGroup?: string; + staticFilePath?: string; + operations?: OperationMeta[]; + related?: string[]; + }; + }>; + }; + + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + + // Ensure content directory exists + if (!fs.existsSync(contentPath)) { + fs.mkdirSync(contentPath, { recursive: true }); + } + + // Generate parent _index.md for the API section + const apiParentDir = path.join(contentPath, 'api'); + const parentIndexFile = path.join(apiParentDir, '_index.md'); + + if (!fs.existsSync(apiParentDir)) { + fs.mkdirSync(apiParentDir, { recursive: true }); + } + + if (!fs.existsSync(parentIndexFile)) { + const parentFrontmatter: Record = { + title: menuParent || 'HTTP API', + description: + productDescription || + 'API reference documentation for all available endpoints.', + weight: 104, + }; + + // Add menu entry for parent page (unless skipParentMenu is true) + if (menuKey && !skipParentMenu) { + parentFrontmatter.menu = { + [menuKey]: { + name: menuParent || 'HTTP API', + }, + }; + } + + const parentContent = `--- +${yaml.dump(parentFrontmatter)}--- +`; + + fs.writeFileSync(parentIndexFile, parentContent); + console.log(`✓ Generated parent index at ${parentIndexFile}`); + } + + // Generate a page for each article (tag) + for (const article of data.articles) { + const pagePath = path.join(contentPath, article.path); + const pageFile = path.join(pagePath, '_index.md'); + + // Create directory if needed + if (!fs.existsSync(pagePath)) { + fs.mkdirSync(pagePath, { recursive: true }); + } + + // Build frontmatter object + const title = article.fields.title || article.fields.name || article.path; + const isConceptual = article.fields.isConceptual === true; + + const frontmatter: Record = { + title, + description: article.fields.description || `API reference for ${title}`, + type: 'api', + layout: isConceptual ? 'single' : 'list', + staticFilePath: article.fields.staticFilePath, + weight: 100, + // Tag-based fields + tag: article.fields.tag, + isConceptual, + menuGroup: article.fields.menuGroup, + }; + + // Add operations for TOC generation (only for non-conceptual pages) + if ( + !isConceptual && + article.fields.operations && + article.fields.operations.length > 0 + ) { + frontmatter.operations = article.fields.operations; + } + + // Add tag description for conceptual pages + if (isConceptual && article.fields.tagDescription) { + frontmatter.tagDescription = article.fields.tagDescription; + } + + // Add showSecuritySchemes flag for authentication pages + if (article.fields.showSecuritySchemes) { + frontmatter.showSecuritySchemes = true; + } + + // Note: We deliberately don't add menu entries for tag-based API pages. + // The API sidebar navigation (api/sidebar-nav.html) handles navigation + // for API reference pages, avoiding conflicts with existing menu items + // like "Query data" and "Write data" that exist in the main sidebar. + + // Add related links if present in article fields + if ( + article.fields.related && + Array.isArray(article.fields.related) && + article.fields.related.length > 0 + ) { + frontmatter.related = article.fields.related; + } + + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + + fs.writeFileSync(pageFile, pageContent); + } + + console.log( + `✓ Generated ${data.articles.length} tag-based content pages in ${contentPath}` + ); + + // Generate individual operation pages for standalone URLs + generateOperationPages({ + articlesPath, + contentPath, + pathSpecFiles, + }); +} + +/** + * Options for generating operation pages + */ +interface GenerateOperationPagesOptions { + /** Path to the articles data directory */ + articlesPath: string; + /** Output path for generated content pages */ + contentPath: string; + /** Map of API path to path-specific spec file (for single-operation rendering) */ + pathSpecFiles?: Map; +} + +/** + * Convert API path to URL-safe slug + * + * Transforms an API path like "/api/v3/write_lp" to a URL-friendly format. + * Removes leading slash and uses the path as-is (underscores are URL-safe). + * + * @param apiPath - The API path (e.g., "/write", "/api/v3/write_lp") + * @returns URL-safe path slug (e.g., "write", "api/v3/write_lp") + */ +function apiPathToSlug(apiPath: string): string { + // Remove leading slash, keep underscores (they're URL-safe) + return apiPath.replace(/^\//, ''); +} + +/** + * Generate standalone Hugo content pages for each API operation + * + * Creates individual pages at path-based URLs like /api/write/post/ + * for each operation, using RapiDoc Mini. + * + * When pathSpecFiles is provided, uses path-specific specs for single-operation + * rendering (filters by method only, avoiding path prefix conflicts). + * Falls back to tag-based specs when pathSpecFiles is not available. + * + * @param options - Generation options + */ +function generateOperationPages(options: GenerateOperationPagesOptions): void { + const { articlesPath, contentPath, pathSpecFiles } = options; + const yaml = require('js-yaml'); + const articlesFile = path.join(articlesPath, 'articles.yml'); + + if (!fs.existsSync(articlesFile)) { + console.warn(`⚠️ Articles file not found: ${articlesFile}`); + return; + } + + // Read articles data + const articlesContent = fs.readFileSync(articlesFile, 'utf8'); + const data = yaml.load(articlesContent) as { + articles: Array<{ + path: string; + fields: { + name?: string; + title?: string; + tag?: string; + isConceptual?: boolean; + showSecuritySchemes?: boolean; + staticFilePath?: string; + operations?: OperationMeta[]; + related?: string[]; + }; + }>; + }; + + if (!data.articles || !Array.isArray(data.articles)) { + console.warn(`⚠️ No articles found in ${articlesFile}`); + return; + } + + let operationCount = 0; + + // Process each article (tag) and generate pages for its operations + for (const article of data.articles) { + // Skip conceptual articles (they don't have operations) + if (article.fields.isConceptual) { + continue; + } + + const operations = article.fields.operations || []; + const tagSpecFile = article.fields.staticFilePath; + const tagName = article.fields.tag || article.fields.name || ''; + + for (const op of operations) { + // Build operation page path: api/{path}/{method}/ + // e.g., /write -> api/write/post/ + // e.g., /api/v3/write_lp -> api/api/v3/write_lp/post/ + const pathSlug = apiPathToSlug(op.path); + const method = op.method.toLowerCase(); + const operationDir = path.join(contentPath, 'api', pathSlug, method); + const operationFile = path.join(operationDir, '_index.md'); + + // Create directory if needed + if (!fs.existsSync(operationDir)) { + fs.mkdirSync(operationDir, { recursive: true }); + } + + // Build frontmatter + const title = op.summary || `${op.method} ${op.path}`; + + // Determine spec file and match-paths based on availability of path-specific specs + // Path-specific specs isolate the path at file level, so we only filter by method + // This avoids substring matching issues (e.g., /admin matching /admin/regenerate) + const pathSpecFile = pathSpecFiles?.get(op.path); + const specFile = pathSpecFile || tagSpecFile; + const matchPaths = pathSpecFile ? method : `${method} ${op.path}`; + + const frontmatter: Record = { + title, + description: `API reference for ${op.method} ${op.path}`, + type: 'api-operation', + layout: 'operation', + // RapiDoc Mini configuration + specFile, + // When using path-specific spec: just method (e.g., "post") + // When using tag spec: method + path (e.g., "post /write") + matchPaths, + // Operation metadata + operationId: op.operationId, + method: op.method, + apiPath: op.path, + tag: tagName, + }; + + // Add compatibility version if present + if (op.compatVersion) { + frontmatter.compatVersion = op.compatVersion; + } + + // Add related links from operation's externalDocs + if (op.externalDocs?.url) { + frontmatter.related = [op.externalDocs.url]; + } + + const pageContent = `--- +${yaml.dump(frontmatter)}--- +`; + + fs.writeFileSync(operationFile, pageContent); + operationCount++; + } + } + + console.log( + `✓ Generated ${operationCount} operation pages in ${contentPath}/api/` + ); +} + +/** + * Product configurations for all InfluxDB editions + * + * Maps product identifiers to their OpenAPI specs and content directories + */ +const productConfigs: ProductConfigMap = { + // TODO: v2 products (cloud-v2, oss-v2) are disabled for now because they + // have existing Redoc-based API reference at /reference/api/ + // Uncomment when ready to migrate v2 products to Scalar + // 'cloud-v2': { + // specFile: path.join(API_DOCS_ROOT, 'influxdb/cloud/v2/ref.yml'), + // pagesDir: path.join(DOCS_ROOT, 'content/influxdb/cloud/api'), + // description: 'InfluxDB Cloud (v2 API)', + // menuKey: 'influxdb_cloud', + // }, + // 'oss-v2': { + // specFile: path.join(API_DOCS_ROOT, 'influxdb/v2/v2/ref.yml'), + // pagesDir: path.join(DOCS_ROOT, 'content/influxdb/v2/api'), + // description: 'InfluxDB OSS v2', + // menuKey: 'influxdb_v2', + // }, + // InfluxDB 3 products use tag-based generation for better UX + // Keys use underscores to match Hugo data directory structure + influxdb3_core: { + specFile: path.join(API_DOCS_ROOT, 'influxdb3/core/v3/ref.yml'), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/core'), + description: 'InfluxDB 3 Core', + menuKey: 'influxdb3_core', + useTagBasedGeneration: true, + }, + influxdb3_enterprise: { + specFile: path.join(API_DOCS_ROOT, 'influxdb3/enterprise/v3/ref.yml'), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/enterprise'), + description: 'InfluxDB 3 Enterprise', + menuKey: 'influxdb3_enterprise', + useTagBasedGeneration: true, + }, + // Note: Cloud Dedicated, Serverless, and Clustered use management APIs + // with paths like /accounts/{accountId}/... so we put them under /api/ + // These products have existing /reference/api/ pages with menu entries, + // so we skip adding menu entries to the generated parent pages. + 'cloud-dedicated': { + specFile: path.join( + API_DOCS_ROOT, + 'influxdb3/cloud-dedicated/management/openapi.yml' + ), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/cloud-dedicated/api'), + description: 'InfluxDB Cloud Dedicated', + menuKey: 'influxdb3_cloud_dedicated', + skipParentMenu: true, + }, + 'cloud-serverless': { + specFile: path.join( + API_DOCS_ROOT, + 'influxdb3/cloud-serverless/management/openapi.yml' + ), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/cloud-serverless/api'), + description: 'InfluxDB Cloud Serverless', + menuKey: 'influxdb3_cloud_serverless', + skipParentMenu: true, + }, + clustered: { + specFile: path.join( + API_DOCS_ROOT, + 'influxdb3/clustered/management/openapi.yml' + ), + pagesDir: path.join(DOCS_ROOT, 'content/influxdb3/clustered/api'), + description: 'InfluxDB Clustered', + menuKey: 'influxdb3_clustered', + skipParentMenu: true, + }, +}; + +/** Fields that can contain markdown with links */ +const MARKDOWN_FIELDS = new Set(['description', 'summary']); + +/** Link placeholder pattern */ +const LINK_PATTERN = /\/influxdb\/version\//g; + +/** + * Derive documentation root from spec file path. + * + * @example + * 'api-docs/influxdb3/core/v3/ref.yml' → '/influxdb3/core' + * 'api-docs/influxdb3/enterprise/v3/ref.yml' → '/influxdb3/enterprise' + * 'api-docs/influxdb/v2/ref.yml' → '/influxdb/v2' + */ +function deriveProductPath(specPath: string): string { + // Match: api-docs/(influxdb3|influxdb)/(product-or-version)/... + const match = specPath.match(/api-docs\/(influxdb3?)\/([\w-]+)\//); + if (!match) { + throw new Error(`Cannot derive product path from: ${specPath}`); + } + return `/${match[1]}/${match[2]}`; +} + +/** + * Transform documentation links in OpenAPI spec markdown fields. + * Replaces `/influxdb/version/` with the actual product path. + * + * @param spec - Parsed OpenAPI spec object + * @param productPath - Target path (e.g., '/influxdb3/core') + * @returns Spec with transformed links (new object, original unchanged) + */ +function transformDocLinks( + spec: Record, + productPath: string +): Record { + function transformValue(value: unknown): unknown { + if (typeof value === 'string') { + return value.replace(LINK_PATTERN, `${productPath}/`); + } + if (Array.isArray(value)) { + return value.map(transformValue); + } + if (value !== null && typeof value === 'object') { + return transformObject(value as Record); + } + return value; + } + + function transformObject( + obj: Record + ): Record { + const result: Record = {}; + for (const [key, value] of Object.entries(obj)) { + if (MARKDOWN_FIELDS.has(key) && typeof value === 'string') { + result[key] = value.replace(LINK_PATTERN, `${productPath}/`); + } else if (value !== null && typeof value === 'object') { + result[key] = transformValue(value); + } else { + result[key] = value; + } + } + return result; + } + + return transformObject(spec); +} + +/** + * Resolve a URL path to a content file path. + * + * @example + * '/influxdb3/core/api/auth/' → 'content/influxdb3/core/api/auth/_index.md' + */ +function resolveContentPath(urlPath: string, contentDir: string): string { + const normalized = urlPath.replace(/\/$/, ''); + const indexPath = path.join(contentDir, normalized, '_index.md'); + const directPath = path.join(contentDir, normalized + '.md'); + + if (fs.existsSync(indexPath)) { + return indexPath; + } + if (fs.existsSync(directPath)) { + return directPath; + } + return indexPath; // Return expected path for error message +} + +/** + * Validate that transformed links point to existing content. + * + * @param spec - Transformed OpenAPI spec + * @param contentDir - Path to Hugo content directory + * @returns Array of error messages for broken links + */ +function validateDocLinks( + spec: Record, + contentDir: string +): string[] { + const errors: string[] = []; + const linkPattern = /\[([^\]]+)\]\(([^)]+)\)/g; + + function extractLinks(value: unknown, jsonPath: string): void { + if (typeof value === 'string') { + let match; + while ((match = linkPattern.exec(value)) !== null) { + const [, linkText, linkUrl] = match; + // Only validate internal links (start with /) + if (linkUrl.startsWith('/') && !linkUrl.startsWith('//')) { + const contentPath = resolveContentPath(linkUrl, contentDir); + if (!fs.existsSync(contentPath)) { + errors.push( + `Broken link at ${jsonPath}: [${linkText}](${linkUrl})` + ); + } + } + } + // Reset regex lastIndex for next string + linkPattern.lastIndex = 0; + } else if (Array.isArray(value)) { + value.forEach((item, index) => + extractLinks(item, `${jsonPath}[${index}]`) + ); + } else if (value !== null && typeof value === 'object') { + for (const [key, val] of Object.entries( + value as Record + )) { + extractLinks(val, `${jsonPath}.${key}`); + } + } + } + + extractLinks(spec, 'spec'); + return errors; +} + +/** + * Process a single product: fetch spec, generate data, and create pages + * + * @param productKey - Product identifier (e.g., 'cloud-v2') + * @param config - Product configuration + */ +function processProduct(productKey: string, config: ProductConfig): void { + console.log('\n' + '='.repeat(80)); + console.log(`Processing ${config.description || productKey}`); + console.log('='.repeat(80)); + + const staticPath = path.join(DOCS_ROOT, 'static/openapi'); + const staticDirName = getStaticDirName(productKey); + const staticSpecPath = path.join(staticPath, `${staticDirName}.yml`); + const staticJsonSpecPath = path.join(staticPath, `${staticDirName}.json`); + const staticPathsPath = path.join(staticPath, `${staticDirName}/paths`); + const articlesPath = path.join( + DOCS_ROOT, + `data/article_data/influxdb/${productKey}` + ); + + // Check if spec file exists + if (!fs.existsSync(config.specFile)) { + console.warn(`⚠️ Spec file not found: ${config.specFile}`); + console.log('Skipping this product. Run getswagger.sh first if needed.\n'); + return; + } + + try { + // Step 1: Execute the getswagger.sh script to fetch/bundle the spec + // Note: getswagger.sh must run from api-docs/ because it uses relative paths + const getswaggerScript = path.join(API_DOCS_ROOT, 'getswagger.sh'); + if (fs.existsSync(getswaggerScript)) { + execCommand( + `cd ${API_DOCS_ROOT} && ./getswagger.sh ${productKey} -B`, + `Fetching OpenAPI spec for ${productKey}` + ); + } else { + console.log(`⚠️ getswagger.sh not found, skipping fetch step`); + } + + // Step 2: Ensure static directory exists + if (!fs.existsSync(staticPath)) { + fs.mkdirSync(staticPath, { recursive: true }); + } + + // Step 3: Load spec, transform documentation links, and write to static folder + if (fs.existsSync(config.specFile)) { + try { + const yaml = require('js-yaml'); + const specContent = fs.readFileSync(config.specFile, 'utf8'); + const specObject = yaml.load(specContent) as Record; + + // Transform documentation links (/influxdb/version/ -> actual product path) + const productPath = deriveProductPath(config.specFile); + const transformedSpec = transformDocLinks(specObject, productPath); + console.log(`✓ Transformed documentation links to ${productPath}`); + + // Validate links if enabled + if (validateLinks) { + const contentDir = path.resolve(__dirname, '../../content'); + const linkErrors = validateDocLinks(transformedSpec, contentDir); + if (linkErrors.length > 0) { + console.warn( + `\n⚠️ Link validation warnings for ${config.specFile}:` + ); + linkErrors.forEach((err) => console.warn(` ${err}`)); + } + } + + // Write transformed spec to static folder (YAML) + fs.writeFileSync(staticSpecPath, yaml.dump(transformedSpec)); + console.log(`✓ Wrote transformed spec to ${staticSpecPath}`); + + // Step 4: Generate JSON version of the spec + fs.writeFileSync( + staticJsonSpecPath, + JSON.stringify(transformedSpec, null, 2) + ); + console.log(`✓ Generated JSON spec at ${staticJsonSpecPath}`); + } catch (specError) { + console.warn(`⚠️ Could not process spec: ${specError}`); + } + } + + // Step 5: Generate Hugo data from OpenAPI spec (using transformed spec) + if (config.useTagBasedGeneration) { + // Tag-based generation: group operations by OpenAPI tag + const staticTagsPath = path.join(staticPath, `${staticDirName}/tags`); + console.log(`\n📋 Using tag-based generation for ${productKey}...`); + openapiPathsToHugo.generateHugoDataByTag({ + specFile: staticSpecPath, + dataOutPath: staticTagsPath, + articleOutPath: articlesPath, + includePaths: true, // Also generate path-based files for backwards compatibility + }); + + // Step 5b: Generate path-specific specs for operation pages + // Each path gets its own spec file, enabling method-only filtering + // This avoids substring matching issues (e.g., /admin matching /admin/regenerate) + console.log( + `\n📋 Generating path-specific specs in ${staticPathsPath}...` + ); + const pathSpecFiles = openapiPathsToHugo.generatePathSpecificSpecs( + staticSpecPath, + staticPathsPath + ); + + // Step 6: Generate Hugo content pages from tag-based article data + generateTagPagesFromArticleData({ + articlesPath, + contentPath: config.pagesDir, + menuKey: config.menuKey, + menuParent: 'InfluxDB HTTP API', + skipParentMenu: config.skipParentMenu, + pathSpecFiles, + }); + } else { + // Path-based generation: group paths by URL prefix (legacy) + generateDataFromOpenAPI(staticSpecPath, staticPathsPath, articlesPath); + + // Step 6: Generate Hugo content pages from path-based article data + generatePagesFromArticleData({ + articlesPath, + contentPath: config.pagesDir, + menuKey: config.menuKey, + menuParent: 'InfluxDB HTTP API', + skipParentMenu: config.skipParentMenu, + }); + } + + console.log( + `\n✅ Successfully processed ${config.description || productKey}\n` + ); + } catch (error) { + console.error(`\n❌ Error processing ${productKey}:`, error); + process.exit(1); + } +} + +/** + * Main execution function + */ +function main(): void { + const args = process.argv.slice(2); + + // Determine which products to process + let productsToProcess: string[]; + + if (args.length === 0) { + // No arguments: process all products + productsToProcess = Object.keys(productConfigs); + console.log('\n📋 Processing all products...\n'); + } else { + // Arguments provided: process only specified products + productsToProcess = args; + console.log( + `\n📋 Processing specified products: ${productsToProcess.join(', ')}\n` + ); + } + + // Validate product keys + const invalidProducts = productsToProcess.filter( + (key) => !productConfigs[key] + ); + if (invalidProducts.length > 0) { + console.error( + `\n❌ Invalid product identifier(s): ${invalidProducts.join(', ')}` + ); + console.error('\nValid products:'); + Object.keys(productConfigs).forEach((key) => { + console.error(` - ${key}: ${productConfigs[key].description}`); + }); + process.exit(1); + } + + // Process each product + productsToProcess.forEach((productKey) => { + const config = productConfigs[productKey]; + processProduct(productKey, config); + }); + + console.log('\n' + '='.repeat(80)); + console.log('✅ All products processed successfully!'); + console.log('='.repeat(80) + '\n'); +} + +// Execute if run directly +if (require.main === module) { + main(); +} + +// Export for use as a module +export { + productConfigs, + processProduct, + generateDataFromOpenAPI, + generatePagesFromArticleData, + deriveProductPath, + transformDocLinks, + validateDocLinks, + resolveContentPath, + MARKDOWN_FIELDS, + LINK_PATTERN, +}; diff --git a/api-docs/scripts/openapi-paths-to-hugo-data/index.ts b/api-docs/scripts/openapi-paths-to-hugo-data/index.ts new file mode 100644 index 0000000000..b401175660 --- /dev/null +++ b/api-docs/scripts/openapi-paths-to-hugo-data/index.ts @@ -0,0 +1,1271 @@ +/** + * OpenAPI to Hugo Data Converter + * + * Converts OpenAPI v3 specifications into Hugo-compatible data files. + * Generates both YAML and JSON versions of spec fragments grouped by path. + * + * @module openapi-paths-to-hugo-data + */ + +import * as yaml from 'js-yaml'; +import * as fs from 'fs'; +import * as path from 'path'; + +/** + * OpenAPI path item object + */ +interface PathItem { + get?: Operation; + post?: Operation; + put?: Operation; + patch?: Operation; + delete?: Operation; + options?: Operation; + head?: Operation; + trace?: Operation; + parameters?: Parameter[]; + [key: string]: unknown; +} + +/** + * OpenAPI operation object + */ +interface Operation { + operationId?: string; + summary?: string; + description?: string; + tags?: string[]; + parameters?: Parameter[]; + requestBody?: RequestBody; + responses?: Record; + externalDocs?: ExternalDocs; + /** Compatibility version for migration context (v1 or v2) */ + 'x-compatibility-version'?: string; + [key: string]: unknown; +} + +/** + * OpenAPI parameter object + */ +interface Parameter { + name: string; + in: 'query' | 'header' | 'path' | 'cookie'; + description?: string; + required?: boolean; + schema?: Schema; + [key: string]: unknown; +} + +/** + * OpenAPI request body object + */ +interface RequestBody { + description?: string; + content?: Record; + required?: boolean; + [key: string]: unknown; +} + +/** + * OpenAPI response object + */ +interface Response { + description: string; + content?: Record; + headers?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI media type object + */ +interface MediaType { + schema?: Schema; + example?: unknown; + examples?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI schema object + */ +interface Schema { + type?: string; + format?: string; + description?: string; + properties?: Record; + items?: Schema; + required?: string[]; + [key: string]: unknown; +} + +/** + * OpenAPI header object + */ +interface Header { + description?: string; + schema?: Schema; + [key: string]: unknown; +} + +/** + * OpenAPI example object + */ +interface Example { + summary?: string; + description?: string; + value?: unknown; + [key: string]: unknown; +} + +/** + * OpenAPI document structure + */ +interface OpenAPIDocument { + openapi: string; + info: Info; + paths: Record; + components?: Components; + servers?: Server[]; + tags?: Tag[]; + description?: string; + 'x-pathGroup'?: string; + [key: string]: unknown; +} + +/** + * OpenAPI info object + */ +interface Info { + title: string; + version: string; + description?: string; + termsOfService?: string; + contact?: Contact; + license?: License; + [key: string]: unknown; +} + +/** + * OpenAPI contact object + */ +interface Contact { + name?: string; + url?: string; + email?: string; + [key: string]: unknown; +} + +/** + * OpenAPI license object + */ +interface License { + name: string; + url?: string; + [key: string]: unknown; +} + +/** + * OpenAPI components object + */ +interface Components { + schemas?: Record; + responses?: Record; + parameters?: Record; + requestBodies?: Record; + headers?: Record; + securitySchemes?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI security scheme object + */ +interface SecurityScheme { + type: string; + description?: string; + [key: string]: unknown; +} + +/** + * OpenAPI server object + */ +interface Server { + url: string; + description?: string; + variables?: Record; + [key: string]: unknown; +} + +/** + * OpenAPI server variable object + */ +interface ServerVariable { + default: string; + enum?: string[]; + description?: string; + [key: string]: unknown; +} + +/** + * OpenAPI tag object + */ +interface Tag { + name: string; + description?: string; + externalDocs?: ExternalDocs; + /** Indicates this is a conceptual/supplementary tag (no operations) */ + 'x-traitTag'?: boolean; + [key: string]: unknown; +} + +/** + * Operation metadata for TOC generation + */ +interface OperationMeta { + operationId: string; + method: string; + path: string; + summary: string; + tags: string[]; + /** Compatibility version (v1 or v2) for migration context */ + compatVersion?: string; + /** External documentation link */ + externalDocs?: { + description: string; + url: string; + }; +} + +/** + * OpenAPI external docs object + */ +interface ExternalDocs { + url: string; + description?: string; + [key: string]: unknown; +} + +/** + * Article metadata for Hugo + */ +interface Article { + path: string; + fields: { + name: string; + describes: string[]; + title?: string; + description?: string; + tags?: string[]; + source?: string; + staticFilePath?: string; + /** Related documentation links extracted from x-relatedLinks */ + related?: string[]; + /** OpenAPI tags from operations (for Hugo frontmatter) */ + apiTags?: string[]; + /** Menu display name (actual endpoint path, different from Hugo path) */ + menuName?: string; + /** OpenAPI tag name (for tag-based articles) */ + tag?: string; + /** Whether this is a conceptual tag (x-traitTag) */ + isConceptual?: boolean; + /** Whether to show security schemes section */ + showSecuritySchemes?: boolean; + /** Tag description from OpenAPI spec */ + tagDescription?: string; + /** Sidebar navigation group */ + menuGroup?: string; + /** Operations metadata for TOC generation */ + operations?: OperationMeta[]; + }; +} + +/** + * Article collection for Hugo data files + */ +interface ArticleCollection { + articles: Article[]; +} + +/** + * Options for generating Hugo data + */ +export interface GenerateHugoDataOptions { + /** Path to the OpenAPI spec file */ + specFile: string; + /** Output path for generated OpenAPI path fragments */ + dataOutPath: string; + /** Output path for article metadata */ + articleOutPath: string; +} + +/** + * Options for writing OpenAPI article data + */ +interface WriteOpenapiArticleDataOptions { + /** File pattern to match when filtering files */ + filePattern?: string; +} + +/** + * Read a YAML file and parse it + * + * @param filepath - Path to the YAML file + * @param encoding - File encoding (default: 'utf8') + * @returns Parsed YAML content + */ +function readFile( + filepath: string, + encoding: BufferEncoding = 'utf8' +): OpenAPIDocument { + const content = fs.readFileSync(filepath, encoding); + return yaml.load(content) as OpenAPIDocument; +} + +/** + * Write data to a YAML file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeDataFile(data: unknown, outputTo: string): void { + fs.writeFileSync(outputTo, yaml.dump(data)); +} + +/** + * Write data to a JSON file + * + * @param data - Data to write + * @param outputTo - Output file path + */ +function writeJsonFile(data: unknown, outputTo: string): void { + fs.writeFileSync(outputTo, JSON.stringify(data, null, 2)); +} + +/** + * OpenAPI utility functions + */ +const openapiUtils = { + /** + * Check if a path fragment is a placeholder (e.g., {id}) + * + * @param str - Path fragment to check + * @returns True if the fragment is a placeholder + */ + isPlaceholderFragment(str: string): boolean { + const placeholderRegex = /^\{.*\}$/; + return placeholderRegex.test(str); + }, +}; + +/** + * Convert tag name to URL-friendly slug + * + * @param tagName - Tag name (e.g., "Write data", "Processing engine") + * @returns URL-friendly slug (e.g., "write-data", "processing-engine") + */ +function slugifyTag(tagName: string): string { + return tagName + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-|-$/g, ''); +} + +/** + * Menu group mappings for tag-based navigation + * Maps OpenAPI tags to sidebar groups + */ +const TAG_MENU_GROUPS: Record = { + // Concepts group + 'Quick start': 'Concepts', + Authentication: 'Concepts', + 'Headers and parameters': 'Concepts', + 'Response codes': 'Concepts', + // Data Operations group + 'Write data': 'Data Operations', + 'Query data': 'Data Operations', + 'Cache data': 'Data Operations', + // Administration group + Database: 'Administration', + Table: 'Administration', + Token: 'Administration', + // Processing Engine group + 'Processing engine': 'Processing Engine', + // Server group + 'Server information': 'Server', + // Compatibility group + 'Compatibility endpoints': 'Compatibility', +}; + +/** + * Get menu group for a tag + * + * @param tagName - Tag name + * @returns Menu group name or 'Other' if not mapped + */ +function getMenuGroupForTag(tagName: string): string { + return TAG_MENU_GROUPS[tagName] || 'Other'; +} + +/** + * HTTP methods to check for operations + */ +const HTTP_METHODS = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', +] as const; + +/** + * Extract all operations from an OpenAPI document grouped by tag + * + * @param openapi - OpenAPI document + * @returns Map of tag name to operations with that tag + */ +function extractOperationsByTag( + openapi: OpenAPIDocument +): Map { + const tagOperations = new Map(); + + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation) { + const opMeta: OperationMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + + // Add operation to each of its tags + (operation.tags || []).forEach((tag) => { + if (!tagOperations.has(tag)) { + tagOperations.set(tag, []); + } + tagOperations.get(tag)!.push(opMeta); + }); + } + }); + }); + + return tagOperations; +} + +/** + * Write OpenAPI specs grouped by tag to separate files + * Generates both YAML and JSON versions per tag + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writeTagOpenapis( + openapi: OpenAPIDocument, + prefix: string, + outPath: string +): void { + const tagOperations = extractOperationsByTag(openapi); + + // Process each tag + tagOperations.forEach((operations, tagName) => { + // Deep copy openapi + const doc: OpenAPIDocument = JSON.parse(JSON.stringify(openapi)); + + // Filter paths to only include those with operations for this tag + const filteredPaths: Record = {}; + Object.entries(openapi.paths).forEach(([pathKey, pathItem]) => { + const filteredPathItem: PathItem = {}; + let hasOperations = false; + + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation?.tags?.includes(tagName)) { + // Clone the operation and restrict tags to only this tag + // This prevents RapiDoc from rendering the operation multiple times + // (once per tag) when an operation belongs to multiple tags + const filteredOperation = { ...operation, tags: [tagName] }; + filteredPathItem[method] = filteredOperation; + hasOperations = true; + } + }); + + // Include path-level parameters if we have operations + if (hasOperations) { + if (pathItem.parameters) { + filteredPathItem.parameters = pathItem.parameters; + } + filteredPaths[pathKey] = filteredPathItem; + } + }); + + doc.paths = filteredPaths; + + // Filter tags to only include this tag (and trait tags for context) + if (doc.tags) { + doc.tags = doc.tags.filter( + (tag) => tag.name === tagName || tag['x-traitTag'] + ); + } + + // Update info + const tagSlug = slugifyTag(tagName); + doc.info.title = tagName; + doc.info.description = `API reference for ${tagName}`; + doc['x-tagGroup'] = tagName; + + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + + console.log( + `Generated tag spec: ${baseFilename}.yaml (${Object.keys(filteredPaths).length} paths, ${operations.length} operations)` + ); + } catch (err) { + console.error(`Error writing tag group ${tagName}:`, err); + } + }); + + // Also create specs for conceptual tags (x-traitTag) without operations + (openapi.tags || []).forEach((tag) => { + if (tag['x-traitTag'] && !tagOperations.has(tag.name)) { + const doc: OpenAPIDocument = JSON.parse(JSON.stringify(openapi)); + doc.paths = {}; + doc.tags = [tag]; + doc.info.title = tag.name; + doc.info.description = tag.description || `API reference for ${tag.name}`; + doc['x-tagGroup'] = tag.name; + + const tagSlug = slugifyTag(tag.name); + + try { + const baseFilename = `${prefix}${tagSlug}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + + console.log(`Generated conceptual tag spec: ${baseFilename}.yaml`); + } catch (err) { + console.error(`Error writing conceptual tag ${tag.name}:`, err); + } + } + }); +} + +/** + * Convert API path to filename-safe slug + * + * @param apiPath - API path (e.g., "/api/v3/configure/token/admin") + * @returns Filename-safe slug (e.g., "api-v3-configure-token-admin") + */ +function pathToFileSlug(apiPath: string): string { + return apiPath + .replace(/^\//, '') // Remove leading slash + .replace(/\//g, '-') // Replace slashes with dashes + .replace(/[{}]/g, '') // Remove curly braces from path params + .replace(/-+/g, '-') // Collapse multiple dashes + .replace(/-$/, ''); // Remove trailing dash +} + +/** + * Write path-specific OpenAPI specs (one file per exact API path) + * + * Each file contains all HTTP methods for a single path, enabling + * operation pages to filter by method only (no path prefix conflicts). + * + * @param openapi - OpenAPI document + * @param outPath - Output directory path (e.g., "static/openapi/{product}/paths") + * @returns Map of API path to spec file path (for use in frontmatter) + */ +export function writePathSpecificSpecs( + openapi: OpenAPIDocument, + outPath: string +): Map { + const pathSpecFiles = new Map(); + + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + + Object.entries(openapi.paths).forEach(([apiPath, pathItem]) => { + // Deep clone pathItem to avoid mutating original + const clonedPathItem: PathItem = JSON.parse(JSON.stringify(pathItem)); + + // Limit each operation to a single tag to prevent duplicate rendering in RapiDoc + // RapiDoc renders operations once per tag, so multiple tags cause duplicates + const usedTags = new Set(); + HTTP_METHODS.forEach((method) => { + const operation = clonedPathItem[method] as Operation | undefined; + if (operation?.tags && operation.tags.length > 0) { + // Select the most specific tag to avoid duplicate rendering + // Prefer "Auth token" over "Authentication" for token-related operations + let primaryTag = operation.tags[0]; + if (operation.tags.includes('Auth token')) { + primaryTag = 'Auth token'; + } + operation.tags = [primaryTag]; + usedTags.add(primaryTag); + } + }); + + // Create spec with just this path (all its methods) + // Include global security requirements so RapiDoc displays auth correctly + const pathSpec: OpenAPIDocument = { + openapi: openapi.openapi, + info: { + ...openapi.info, + title: apiPath, + description: `API reference for ${apiPath}`, + }, + paths: { [apiPath]: clonedPathItem }, + components: openapi.components, // Include for $ref resolution + servers: openapi.servers, + security: openapi.security, // Global security requirements + }; + + // Filter spec-level tags to only include those used by operations + if (openapi.tags) { + pathSpec.tags = openapi.tags.filter( + (tag) => usedTags.has(tag.name) && !tag['x-traitTag'] + ); + } + + // Write files + const slug = pathToFileSlug(apiPath); + const yamlPath = path.resolve(outPath, `${slug}.yaml`); + const jsonPath = path.resolve(outPath, `${slug}.json`); + + writeDataFile(pathSpec, yamlPath); + writeJsonFile(pathSpec, jsonPath); + + // Store the web-accessible path (without "static/" prefix) + // Hugo serves files from static/ at the root, so we extract the path after 'static/' + const staticMatch = yamlPath.match(/static\/(.+)$/); + const webPath = staticMatch ? `/${staticMatch[1]}` : yamlPath; + pathSpecFiles.set(apiPath, webPath); + }); + + console.log( + `Generated ${pathSpecFiles.size} path-specific specs in ${outPath}` + ); + + return pathSpecFiles; +} + +/** + * Write OpenAPI specs grouped by path to separate files + * Generates both YAML and JSON versions + * + * @param openapi - OpenAPI document + * @param prefix - Filename prefix for output files + * @param outPath - Output directory path + */ +function writePathOpenapis( + openapi: OpenAPIDocument, + prefix: string, + outPath: string +): void { + const pathGroups: Record> = {}; + + // Group paths by their base path (first 3-4 segments, excluding placeholders) + Object.keys(openapi.paths) + .sort() + .forEach((p) => { + const delimiter = '/'; + let key = p.split(delimiter); + + // Check if this is an item path (ends with a placeholder) + let isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + + // Take first 4 segments + key = key.slice(0, 4); + + // Check if the last segment is still a placeholder + isItemPath = openapiUtils.isPlaceholderFragment(key[key.length - 1]); + if (isItemPath) { + key = key.slice(0, -1); + } + + const groupKey = key.join('/'); + pathGroups[groupKey] = pathGroups[groupKey] || {}; + pathGroups[groupKey][p] = openapi.paths[p]; + }); + + // Write each path group to separate YAML and JSON files + Object.keys(pathGroups).forEach((pg) => { + // Deep copy openapi + const doc: OpenAPIDocument = JSON.parse(JSON.stringify(openapi)); + doc.paths = pathGroups[pg]; + + // Collect tags used by operations in this path group + const usedTags = new Set(); + Object.values(doc.paths).forEach((pathItem: PathItem) => { + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + httpMethods.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation?.tags) { + operation.tags.forEach((tag) => usedTags.add(tag)); + } + }); + }); + + // Filter tags to only include those used by operations in this path group + // Exclude x-traitTag tags (supplementary documentation tags) + if (doc.tags) { + doc.tags = doc.tags.filter( + (tag) => usedTags.has(tag.name) && !tag['x-traitTag'] + ); + } + + // Simplify info for path-specific docs + doc.info.title = pg; + doc.info.description = `API reference for ${pg}`; + doc['x-pathGroup'] = pg; + + try { + if (!fs.existsSync(outPath)) { + fs.mkdirSync(outPath, { recursive: true }); + } + + const baseFilename = `${prefix}${pg.replaceAll('/', '-').replace(/^-/, '')}`; + const yamlPath = path.resolve(outPath, `${baseFilename}.yaml`); + const jsonPath = path.resolve(outPath, `${baseFilename}.json`); + + // Write both YAML and JSON versions + writeDataFile(doc, yamlPath); + writeJsonFile(doc, jsonPath); + + console.log(`Generated: ${baseFilename}.yaml and ${baseFilename}.json`); + } catch (err) { + console.error(`Error writing path group ${pg}:`, err); + } + }); +} + +/** + * Create article metadata for a path group + * + * @param openapi - OpenAPI document with x-pathGroup + * @returns Article metadata object + */ +function createArticleDataForPathGroup(openapi: OpenAPIDocument): Article { + const article: Article = { + path: '', + fields: { + name: openapi['x-pathGroup'] || '', + describes: Object.keys(openapi.paths), + }, + }; + + /** + * Convert OpenAPI path to Hugo-friendly article path + * Legacy endpoints (without /api/ prefix) go under api/ directly + * Versioned endpoints (with /api/vN/) keep their structure + * + * @param p - Path to convert (e.g., '/health', '/api/v3/query_sql') + * @returns Path suitable for Hugo content directory (e.g., 'api/health', 'api/v3/query_sql') + */ + const toHugoPath = (p: string): string => { + if (!p) { + return ''; + } + // If path doesn't start with /api/, it's a legacy endpoint + // Place it directly under api/ to avoid collision with /api/v1/* paths + if (!p.startsWith('/api/')) { + // /health -> api/health + // /write -> api/write + return `api${p}`; + } + // /api/v1/health -> api/v1/health + // /api/v2/write -> api/v2/write + // /api/v3/query_sql -> api/v3/query_sql + return p.replace(/^\//, ''); + }; + + /** + * Convert path to tag-friendly format (dashes instead of slashes) + * + * @param p - Path to convert + * @returns Tag-friendly path + */ + const toTagPath = (p: string): string => { + if (!p) { + return ''; + } + return p.replace(/^\//, '').replaceAll('/', '-'); + }; + + const pathGroup = openapi['x-pathGroup'] || ''; + article.path = toHugoPath(pathGroup); + // Store original path for menu display (shows actual endpoint path) + article.fields.menuName = pathGroup; + article.fields.title = openapi.info?.title; + article.fields.description = openapi.description; + + const pathGroupFrags = path.parse(openapi['x-pathGroup'] || ''); + article.fields.tags = [pathGroupFrags?.dir, pathGroupFrags?.name] + .filter(Boolean) + .map((t) => toTagPath(t)); + + // Extract x-relatedLinks and OpenAPI tags from path items or operations + const relatedLinks: string[] = []; + const apiTags: string[] = []; + const httpMethods = [ + 'get', + 'post', + 'put', + 'patch', + 'delete', + 'options', + 'head', + 'trace', + ]; + + Object.values(openapi.paths).forEach((pathItem: PathItem) => { + // Check path-level x-relatedLinks + if ( + pathItem['x-relatedLinks'] && + Array.isArray(pathItem['x-relatedLinks']) + ) { + relatedLinks.push( + ...(pathItem['x-relatedLinks'] as string[]).filter( + (link) => !relatedLinks.includes(link) + ) + ); + } + + // Check operation-level x-relatedLinks and tags + httpMethods.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation) { + // Extract x-relatedLinks + if ( + operation['x-relatedLinks'] && + Array.isArray(operation['x-relatedLinks']) + ) { + relatedLinks.push( + ...(operation['x-relatedLinks'] as string[]).filter( + (link) => !relatedLinks.includes(link) + ) + ); + } + // Extract OpenAPI tags from operation + if (operation.tags && Array.isArray(operation.tags)) { + operation.tags.forEach((tag) => { + if (!apiTags.includes(tag)) { + apiTags.push(tag); + } + }); + } + } + }); + }); + + // Only add related if there are links + if (relatedLinks.length > 0) { + article.fields.related = relatedLinks; + } + + // Add OpenAPI tags from operations (for Hugo frontmatter) + if (apiTags.length > 0) { + article.fields.apiTags = apiTags; + } + + return article; +} + +/** + * Write OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing OpenAPI fragment files + * @param targetPath - Output path for article data + * @param opts - Options including file pattern filter + */ +function writeOpenapiArticleData( + sourcePath: string, + targetPath: string, + opts: WriteOpenapiArticleDataOptions +): void { + /** + * Check if path is a file + */ + const isFile = (filePath: string): boolean => { + return fs.lstatSync(filePath).isFile(); + }; + + /** + * Check if filename matches pattern + */ + const matchesPattern = (filePath: string): boolean => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter( + (filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml') + ) // Only process YAML files + .map((filePath) => { + const openapi = readFile(filePath); + const article = createArticleDataForPathGroup(openapi); + article.fields.source = filePath; + // Hugo omits "/static" from the URI when serving files stored in "./static" + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + + const articleCollection: ArticleCollection = { articles }; + + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + + console.log(`Generated ${articles.length} articles in ${targetPath}`); + } catch (e) { + console.error('Error writing article data:', e); + } +} + +/** + * Create article data for a tag-based grouping + * + * @param openapi - OpenAPI document with x-tagGroup + * @param operations - Operations for this tag + * @param tagMeta - Tag metadata from OpenAPI spec + * @returns Article metadata object + */ +function createArticleDataForTag( + openapi: OpenAPIDocument, + operations: OperationMeta[], + tagMeta?: Tag +): Article { + const tagName = (openapi['x-tagGroup'] as string) || ''; + const tagSlug = slugifyTag(tagName); + const isConceptual = tagMeta?.['x-traitTag'] === true; + + const article: Article = { + path: `api/${tagSlug}`, + fields: { + name: tagName, + describes: Object.keys(openapi.paths), + title: tagName, + description: + tagMeta?.description || + openapi.info?.description || + `API reference for ${tagName}`, + tag: tagName, + isConceptual, + menuGroup: getMenuGroupForTag(tagName), + operations: operations.map((op) => ({ + operationId: op.operationId, + method: op.method, + path: op.path, + summary: op.summary, + tags: op.tags, + ...(op.compatVersion && { compatVersion: op.compatVersion }), + ...(op.externalDocs && { externalDocs: op.externalDocs }), + })), + }, + }; + + // Add tag description for conceptual pages + if (tagMeta?.description) { + article.fields.tagDescription = tagMeta.description; + } + + // Show security schemes section on Authentication pages + if (tagName === 'Authentication') { + article.fields.showSecuritySchemes = true; + } + + // Aggregate unique externalDocs URLs from operations into article-level related + // This populates Hugo frontmatter `related` field for "Related content" links + const relatedUrls = new Set(); + + // First check tag-level externalDocs + if (tagMeta?.externalDocs?.url) { + relatedUrls.add(tagMeta.externalDocs.url); + } + + // Then aggregate from operations + operations.forEach((op) => { + if (op.externalDocs?.url) { + relatedUrls.add(op.externalDocs.url); + } + }); + + if (relatedUrls.size > 0) { + article.fields.related = Array.from(relatedUrls); + } + + return article; +} + +/** + * Write tag-based OpenAPI article metadata to Hugo data files + * Generates articles.yml and articles.json + * + * @param sourcePath - Path to directory containing tag-based OpenAPI fragment files + * @param targetPath - Output path for article data + * @param openapi - Original OpenAPI document (for tag metadata) + * @param opts - Options including file pattern filter + */ +function writeOpenapiTagArticleData( + sourcePath: string, + targetPath: string, + openapi: OpenAPIDocument, + opts: WriteOpenapiArticleDataOptions +): void { + const isFile = (filePath: string): boolean => { + return fs.lstatSync(filePath).isFile(); + }; + + const matchesPattern = (filePath: string): boolean => { + return opts.filePattern + ? path.parse(filePath).name.startsWith(opts.filePattern) + : true; + }; + + // Create tag metadata lookup + const tagMetaMap = new Map(); + (openapi.tags || []).forEach((tag) => { + tagMetaMap.set(tag.name, tag); + }); + + try { + const articles = fs + .readdirSync(sourcePath) + .map((fileName) => path.join(sourcePath, fileName)) + .filter(matchesPattern) + .filter(isFile) + .filter( + (filePath) => filePath.endsWith('.yaml') || filePath.endsWith('.yml') + ) + .map((filePath) => { + const tagOpenapi = readFile(filePath); + const tagName = + (tagOpenapi['x-tagGroup'] as string) || tagOpenapi.info?.title || ''; + const tagMeta = tagMetaMap.get(tagName); + + // Extract operations from the tag-filtered spec + const operations: OperationMeta[] = []; + Object.entries(tagOpenapi.paths).forEach(([pathKey, pathItem]) => { + HTTP_METHODS.forEach((method) => { + const operation = pathItem[method] as Operation | undefined; + if (operation) { + const opMeta: OperationMeta = { + operationId: operation.operationId || `${method}-${pathKey}`, + method: method.toUpperCase(), + path: pathKey, + summary: operation.summary || '', + tags: operation.tags || [], + }; + + // Extract compatibility version if present + if (operation['x-compatibility-version']) { + opMeta.compatVersion = operation['x-compatibility-version']; + } + + // Extract externalDocs if present + if (operation.externalDocs) { + opMeta.externalDocs = { + description: operation.externalDocs.description || '', + url: operation.externalDocs.url, + }; + } + + operations.push(opMeta); + } + }); + }); + + const article = createArticleDataForTag( + tagOpenapi, + operations, + tagMeta + ); + article.fields.source = filePath; + article.fields.staticFilePath = filePath.replace(/^static\//, '/'); + return article; + }); + + if (!fs.existsSync(targetPath)) { + fs.mkdirSync(targetPath, { recursive: true }); + } + + const articleCollection: ArticleCollection = { articles }; + + // Write both YAML and JSON versions + const yamlPath = path.resolve(targetPath, 'articles.yml'); + const jsonPath = path.resolve(targetPath, 'articles.json'); + + writeDataFile(articleCollection, yamlPath); + writeJsonFile(articleCollection, jsonPath); + + console.log( + `Generated ${articles.length} tag-based articles in ${targetPath}` + ); + } catch (e) { + console.error('Error writing tag article data:', e); + } +} + +/** + * Options for generating Hugo data by tag + */ +export interface GenerateHugoDataByTagOptions extends GenerateHugoDataOptions { + /** Whether to also generate path-based files (for backwards compatibility) */ + includePaths?: boolean; +} + +/** + * Generate Hugo data files from an OpenAPI specification grouped by tag + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups operations by their OpenAPI tags + * 3. Writes each tag group to separate YAML and JSON files + * 4. Generates tag-based article metadata for Hugo + * + * @param options - Generation options + */ +export function generateHugoDataByTag( + options: GenerateHugoDataByTagOptions +): void { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + const sourceFile = readFile(options.specFile, 'utf8'); + + // Optionally generate path-based files for backwards compatibility + if (options.includePaths) { + console.log( + `\nGenerating OpenAPI path files in ${options.dataOutPath}....` + ); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + } + + // Generate tag-based files + const tagOutPath = options.includePaths + ? path.join(options.dataOutPath, 'tags') + : options.dataOutPath; + + console.log(`\nGenerating OpenAPI tag files in ${tagOutPath}....`); + writeTagOpenapis(sourceFile, filenamePrefix, tagOutPath); + + console.log( + `\nGenerating OpenAPI tag article data in ${options.articleOutPath}...` + ); + writeOpenapiTagArticleData(tagOutPath, options.articleOutPath, sourceFile, { + filePattern: filenamePrefix, + }); + + console.log('\nTag-based generation complete!\n'); +} + +/** + * Generate Hugo data files from an OpenAPI specification + * + * This function: + * 1. Reads the OpenAPI spec file + * 2. Groups paths by their base path + * 3. Writes each group to separate YAML and JSON files + * 4. Generates article metadata for Hugo + * + * @param options - Generation options + */ +export function generateHugoData(options: GenerateHugoDataOptions): void { + const filenamePrefix = `${path.parse(options.specFile).name}-`; + + const sourceFile = readFile(options.specFile, 'utf8'); + + console.log(`\nGenerating OpenAPI path files in ${options.dataOutPath}....`); + writePathOpenapis(sourceFile, filenamePrefix, options.dataOutPath); + + console.log( + `\nGenerating OpenAPI article data in ${options.articleOutPath}...` + ); + writeOpenapiArticleData(options.dataOutPath, options.articleOutPath, { + filePattern: filenamePrefix, + }); + + console.log('\nGeneration complete!\n'); +} + +/** + * Generate path-specific OpenAPI specs from a spec file + * + * Convenience wrapper that reads the spec file and generates path-specific specs. + * + * @param specFile - Path to OpenAPI spec file + * @param outPath - Output directory for path-specific specs + * @returns Map of API path to spec file web path (for use in frontmatter) + */ +export function generatePathSpecificSpecs( + specFile: string, + outPath: string +): Map { + const openapi = readFile(specFile, 'utf8'); + return writePathSpecificSpecs(openapi, outPath); +} + +// CommonJS export for backward compatibility +module.exports = { + generateHugoData, + generateHugoDataByTag, + generatePathSpecificSpecs, + writePathSpecificSpecs, +}; diff --git a/api-docs/scripts/openapi-paths-to-hugo-data/package.json b/api-docs/scripts/openapi-paths-to-hugo-data/package.json new file mode 100644 index 0000000000..78bd5bc114 --- /dev/null +++ b/api-docs/scripts/openapi-paths-to-hugo-data/package.json @@ -0,0 +1,14 @@ +{ + "name": "openapi-paths-to-hugo-data", + "version": "1.0.0", + "description": "Convert OpenAPI specifications to Hugo data files for API documentation", + "main": "index.js", + "type": "commonjs", + "dependencies": { + "js-yaml": "^4.1.1" + }, + "devDependencies": {}, + "scripts": {}, + "author": "InfluxData", + "license": "MIT" +} diff --git a/api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock b/api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock new file mode 100644 index 0000000000..96bb86828b --- /dev/null +++ b/api-docs/scripts/openapi-paths-to-hugo-data/yarn.lock @@ -0,0 +1,32 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@types/js-yaml@^4.0.9": + version "4.0.9" + resolved "https://registry.yarnpkg.com/@types/js-yaml/-/js-yaml-4.0.9.tgz#cd82382c4f902fed9691a2ed79ec68c5898af4c2" + integrity sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg== + +"@types/node@^24.10.1": + version "24.10.1" + resolved "https://registry.yarnpkg.com/@types/node/-/node-24.10.1.tgz#91e92182c93db8bd6224fca031e2370cef9a8f01" + integrity sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ== + dependencies: + undici-types "~7.16.0" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +js-yaml@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== + dependencies: + argparse "^2.0.1" + +undici-types@~7.16.0: + version "7.16.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.16.0.tgz#ffccdff36aea4884cbfce9a750a0580224f58a46" + integrity sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw== diff --git a/api-docs/scripts/tsconfig.json b/api-docs/scripts/tsconfig.json new file mode 100644 index 0000000000..e36776534b --- /dev/null +++ b/api-docs/scripts/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2021", + "lib": ["ES2021"], + "module": "CommonJS", + "moduleResolution": "node", + "outDir": "./dist", + "rootDir": ".", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "declaration": true, + "sourceMap": true, + "types": ["node"] + }, + "include": [ + "**/*.ts" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/assets/js/components/api-auth-input.ts b/assets/js/components/api-auth-input.ts new file mode 100644 index 0000000000..dc37965e42 --- /dev/null +++ b/assets/js/components/api-auth-input.ts @@ -0,0 +1,559 @@ +/** + * API Auth Input Component (Popover) + * + * Provides a popover-based credential input for API operations. + * Integrates with RapiDoc's auth system via JavaScript API. + * + * Features: + * - Popover UI triggered by button click + * - Filters auth schemes based on operation requirements + * - Session-only credentials (not persisted to storage) + * - Syncs with RapiDoc's "Try it" feature + * + * Usage: + * + * + */ + +interface ComponentOptions { + component: HTMLElement; +} + +interface AuthCredentials { + bearer?: string; + basic?: { username: string; password: string }; + querystring?: string; +} + +type CleanupFn = () => void; + +// In-memory credential storage (not persisted) +let currentCredentials: AuthCredentials = {}; + +/** + * Get current credentials (in-memory only) + */ +function getCredentials(): AuthCredentials { + return currentCredentials; +} + +/** + * Set credentials (in-memory only, not persisted) + */ +function setCredentials(credentials: AuthCredentials): void { + currentCredentials = credentials; +} + +/** + * Check if any credentials are set + */ +function hasCredentials(): boolean { + return !!( + currentCredentials.bearer || + currentCredentials.basic?.password || + currentCredentials.querystring + ); +} + +/** + * Try to update the visible auth input in RapiDoc's shadow DOM. + * This provides visual feedback but is not essential for authentication. + */ +function updateRapiDocAuthInput( + rapiDoc: HTMLElement, + token: string, + scheme: 'bearer' | 'token' +): void { + try { + const shadowRoot = rapiDoc.shadowRoot; + if (!shadowRoot) return; + + const headerValue = + scheme === 'bearer' ? `Bearer ${token}` : `Token ${token}`; + + const authInputSelectors = [ + 'input[data-pname="Authorization"]', + 'input[placeholder*="authorization" i]', + 'input[placeholder*="token" i]', + '.request-headers input[type="text"]', + ]; + + for (const selector of authInputSelectors) { + const input = shadowRoot.querySelector(selector); + if (input && !input.value) { + input.value = headerValue; + input.dispatchEvent(new Event('input', { bubbles: true })); + console.log('[API Auth] Updated visible auth input in RapiDoc'); + return; + } + } + } catch (e) { + console.debug('[API Auth] Could not update visible input:', e); + } +} + +/** + * Apply credentials to a RapiDoc element + * Returns true if credentials were successfully applied + */ +function applyCredentialsToRapiDoc( + rapiDoc: HTMLElement, + credentials: AuthCredentials +): boolean { + let applied = false; + + // Clear existing credentials first + if ('removeAllSecurityKeys' in rapiDoc) { + try { + (rapiDoc as any).removeAllSecurityKeys(); + } catch (e) { + console.warn('[API Auth] Failed to clear existing credentials:', e); + } + } + + // Apply bearer/token credentials + if (credentials.bearer) { + try { + // Method 1: HTML attributes (most reliable) + rapiDoc.setAttribute('api-key-name', 'Authorization'); + rapiDoc.setAttribute('api-key-location', 'header'); + rapiDoc.setAttribute('api-key-value', `Bearer ${credentials.bearer}`); + console.log('[API Auth] Set auth via HTML attributes'); + + // Method 2: JavaScript API for scheme-specific auth + if ('setApiKey' in rapiDoc) { + (rapiDoc as any).setApiKey('BearerAuthentication', credentials.bearer); + (rapiDoc as any).setApiKey('TokenAuthentication', credentials.bearer); + console.log('[API Auth] Applied bearer/token via setApiKey()'); + } + + applied = true; + updateRapiDocAuthInput(rapiDoc, credentials.bearer, 'bearer'); + } catch (e) { + console.error('[API Auth] Failed to set API key:', e); + } + } + + // Apply basic auth credentials + if ('setHttpUserNameAndPassword' in rapiDoc && credentials.basic?.password) { + try { + (rapiDoc as any).setHttpUserNameAndPassword( + 'BasicAuthentication', + credentials.basic.username || '', + credentials.basic.password + ); + applied = true; + console.log('[API Auth] Applied basic auth credentials to RapiDoc'); + } catch (e) { + console.error('[API Auth] Failed to set basic auth:', e); + } + } + + return applied; +} + +/** + * Create auth field HTML for a specific scheme + */ +function createAuthField(scheme: string): string { + switch (scheme) { + case 'bearer': + return ` +
+ +
+ + +
+
`; + + case 'token': + return ` +
+ +
+ + +
+
`; + + case 'basic': + return ` +
+

Basic Authentication (v1 compatibility)

+
+ + +
+
+ +
+ + +
+
+
`; + + case 'querystring': + return ` +
+ +
+ + +
+
`; + + default: + return ''; + } +} + +/** + * Create the popover content HTML + */ +function createPopoverContent(schemes: string[]): string { + // If both bearer and token are supported, show combined field + const hasBearerAndToken = + schemes.includes('bearer') && schemes.includes('token'); + const displaySchemes = hasBearerAndToken + ? schemes.filter((s) => s !== 'token') + : schemes; + + const fields = displaySchemes.map((s) => createAuthField(s)).join(''); + + // Adjust label if both bearer and token are supported + const bearerLabel = hasBearerAndToken + ? '(Bearer / Token auth)' + : '(Bearer auth)'; + + return ` +
+
+

API Credentials

+ +
+

+ Enter credentials for "Try it" requests. +

+ ${fields.replace('(Bearer auth)', bearerLabel)} +
+ + +
+ +
+ `; +} + +/** + * Show feedback message + */ +function showFeedback( + container: HTMLElement, + message: string, + type: 'success' | 'error' +): void { + const feedback = container.querySelector('.auth-feedback'); + if (feedback) { + feedback.textContent = message; + feedback.className = `auth-feedback auth-feedback--${type}`; + feedback.hidden = false; + + setTimeout(() => { + feedback.hidden = true; + }, 3000); + } +} + +/** + * Update the status indicator on the trigger button + */ +function updateStatusIndicator(trigger: HTMLElement): void { + const indicator = trigger.querySelector( + '.auth-status-indicator' + ); + const hasCreds = hasCredentials(); + + if (indicator) { + indicator.hidden = !hasCreds; + } + + trigger.classList.toggle('has-credentials', hasCreds); +} + +/** + * Initialize the auth input popover component + */ +export default function ApiAuthInput({ + component, +}: ComponentOptions): CleanupFn | void { + // Component is the trigger button + const trigger = component; + const popoverEl = trigger.nextElementSibling as HTMLElement | null; + + if (!popoverEl || !popoverEl.classList.contains('api-auth-popover')) { + console.error('[API Auth] Popover container not found'); + return; + } + + // Now TypeScript knows popover is not null + const popover = popoverEl; + + const schemesAttr = trigger.dataset.schemes || 'bearer'; + const schemes = schemesAttr.split(',').map((s) => s.trim().toLowerCase()); + + // Render popover content + popover.innerHTML = createPopoverContent(schemes); + + // Element references + const bearerInput = popover.querySelector('#auth-bearer'); + const tokenInput = popover.querySelector('#auth-token'); + const usernameInput = + popover.querySelector('#auth-username'); + const passwordInput = + popover.querySelector('#auth-password'); + const querystringInput = + popover.querySelector('#auth-querystring'); + const applyBtn = popover.querySelector('.auth-apply'); + const clearBtn = popover.querySelector('.auth-clear'); + const closeBtn = popover.querySelector('.popover-close'); + + /** + * Toggle popover visibility + */ + function togglePopover(show?: boolean): void { + const shouldShow = show ?? popover.hidden; + popover.hidden = !shouldShow; + trigger.setAttribute('aria-expanded', String(shouldShow)); + + if (shouldShow) { + // Focus first input when opening + const firstInput = popover.querySelector( + 'input:not([type="hidden"])' + ); + firstInput?.focus(); + } + } + + /** + * Close popover + */ + function closePopover(): void { + togglePopover(false); + trigger.focus(); + } + + // Trigger button click + trigger.addEventListener('click', (e) => { + e.stopPropagation(); + togglePopover(); + }); + + // Close button + closeBtn?.addEventListener('click', closePopover); + + // Close on outside click + function handleOutsideClick(e: MouseEvent): void { + if ( + !popover.hidden && + !popover.contains(e.target as Node) && + !trigger.contains(e.target as Node) + ) { + closePopover(); + } + } + document.addEventListener('click', handleOutsideClick); + + // Close on Escape + function handleEscape(e: KeyboardEvent): void { + if (e.key === 'Escape' && !popover.hidden) { + closePopover(); + } + } + document.addEventListener('keydown', handleEscape); + + // Show/hide toggle for password fields + const showToggles = + popover.querySelectorAll('.auth-show-toggle'); + showToggles.forEach((btn) => { + btn.addEventListener('click', () => { + const targetId = btn.dataset.target; + const input = popover.querySelector(`#${targetId}`); + if (input) { + const isPassword = input.type === 'password'; + input.type = isPassword ? 'text' : 'password'; + btn.classList.toggle('showing', !isPassword); + } + }); + }); + + /** + * Apply credentials + */ + function applyCredentials(): void { + const newCredentials: AuthCredentials = {}; + + // Get token from bearer or token input (they're combined for UX) + const tokenValue = bearerInput?.value || tokenInput?.value; + if (tokenValue) { + newCredentials.bearer = tokenValue; + } + + if (usernameInput?.value || passwordInput?.value) { + newCredentials.basic = { + username: usernameInput?.value || '', + password: passwordInput?.value || '', + }; + } + + if (querystringInput?.value) { + newCredentials.querystring = querystringInput.value; + } + + setCredentials(newCredentials); + updateStatusIndicator(trigger); + + // Apply to RapiDoc + const rapiDoc = document.querySelector('rapi-doc') as HTMLElement | null; + if (rapiDoc && 'setApiKey' in rapiDoc) { + const applied = applyCredentialsToRapiDoc(rapiDoc, newCredentials); + if (applied) { + showFeedback(popover, 'Credentials applied', 'success'); + } else { + showFeedback(popover, 'No credentials to apply', 'error'); + } + } else { + showFeedback(popover, 'Saved (API viewer loading...)', 'success'); + } + } + + /** + * Clear credentials + */ + function clearCredentials(): void { + if (bearerInput) bearerInput.value = ''; + if (tokenInput) tokenInput.value = ''; + if (usernameInput) usernameInput.value = ''; + if (passwordInput) passwordInput.value = ''; + if (querystringInput) querystringInput.value = ''; + + setCredentials({}); + updateStatusIndicator(trigger); + + // Clear from RapiDoc + const rapiDoc = document.querySelector('rapi-doc') as HTMLElement | null; + if (rapiDoc) { + rapiDoc.removeAttribute('api-key-name'); + rapiDoc.removeAttribute('api-key-location'); + rapiDoc.removeAttribute('api-key-value'); + + if ('removeAllSecurityKeys' in rapiDoc) { + try { + (rapiDoc as any).removeAllSecurityKeys(); + } catch (e) { + console.debug('[API Auth] Failed to clear RapiDoc credentials:', e); + } + } + } + + showFeedback(popover, 'Credentials cleared', 'success'); + } + + // Button handlers + applyBtn?.addEventListener('click', applyCredentials); + clearBtn?.addEventListener('click', clearCredentials); + + // Listen for RapiDoc spec-loaded event to apply stored credentials + function handleSpecLoaded(event: Event): void { + const rapiDoc = event.target as HTMLElement; + const storedCredentials = getCredentials(); + if ( + storedCredentials.bearer || + storedCredentials.basic?.password || + storedCredentials.querystring + ) { + setTimeout(() => { + applyCredentialsToRapiDoc(rapiDoc, storedCredentials); + }, 100); + } + } + + // Watch for RapiDoc elements + const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + for (const node of mutation.addedNodes) { + if (node instanceof HTMLElement && node.tagName === 'RAPI-DOC') { + node.addEventListener('spec-loaded', handleSpecLoaded); + } + } + } + }); + + observer.observe(document.body, { childList: true, subtree: true }); + + // Check if RapiDoc already exists + const existingRapiDoc = document.querySelector('rapi-doc'); + if (existingRapiDoc) { + existingRapiDoc.addEventListener('spec-loaded', handleSpecLoaded); + } + + // Initialize status indicator + updateStatusIndicator(trigger); + + // Cleanup function + return (): void => { + observer.disconnect(); + document.removeEventListener('click', handleOutsideClick); + document.removeEventListener('keydown', handleEscape); + existingRapiDoc?.removeEventListener('spec-loaded', handleSpecLoaded); + }; +} diff --git a/assets/js/components/api-rapidoc.ts b/assets/js/components/api-rapidoc.ts new file mode 100644 index 0000000000..8807c46c42 --- /dev/null +++ b/assets/js/components/api-rapidoc.ts @@ -0,0 +1,259 @@ +/** + * RapiDoc API Documentation Component + * + * Initializes the full RapiDoc renderer with theme synchronization. + * This is the component version of the inline JavaScript from rapidoc.html. + * + * Features: + * - Theme detection from Hugo's stylesheet toggle system + * - Automatic theme synchronization when user toggles dark/light mode + * - Shadow DOM manipulation to hide unwanted UI elements + * - CSS custom property injection for styling + * + * Usage: + *
+ * + * The component expects a element to already exist in the container + * (created by Hugo template) or will wait for it to be added. + */ + +import { getPreference } from '../services/local-storage.js'; + +interface ComponentOptions { + component: HTMLElement; +} + +interface ThemeColors { + theme: 'light' | 'dark'; + bgColor: string; + textColor: string; + headerColor: string; + primaryColor: string; + navBgColor: string; + navTextColor: string; + navHoverBgColor: string; + navHoverTextColor: string; + navAccentColor: string; + codeTheme: string; +} + +type CleanupFn = () => void; + +/** + * Get current theme from localStorage (source of truth for Hugo theme system) + */ +function getTheme(): 'dark' | 'light' { + const theme = getPreference('theme'); + return theme === 'dark' ? 'dark' : 'light'; +} + +/** + * Get theme colors matching Hugo SCSS variables + */ +function getThemeColors(isDark: boolean): ThemeColors { + if (isDark) { + return { + theme: 'dark', + bgColor: '#14141F', // $grey10 ($article-bg in dark theme) + textColor: '#D4D7DD', // $g15-platinum + headerColor: '#D4D7DD', + primaryColor: '#a0a0ff', + navBgColor: '#1a1a2a', + navTextColor: '#D4D7DD', + navHoverBgColor: '#252535', + navHoverTextColor: '#ffffff', + navAccentColor: '#a0a0ff', + codeTheme: 'monokai', + }; + } + + return { + theme: 'light', + bgColor: '#ffffff', // $g20-white + textColor: '#2b2b2b', + headerColor: '#020a47', // $br-dark-blue + primaryColor: '#020a47', + navBgColor: '#f7f8fa', + navTextColor: '#2b2b2b', + navHoverBgColor: '#e8e8f0', + navHoverTextColor: '#020a47', + navAccentColor: '#020a47', + codeTheme: 'prism', + }; +} + +/** + * Apply theme to RapiDoc element + */ +function applyTheme(rapiDoc: HTMLElement): void { + const isDark = getTheme() === 'dark'; + const colors = getThemeColors(isDark); + + rapiDoc.setAttribute('theme', colors.theme); + rapiDoc.setAttribute('bg-color', colors.bgColor); + rapiDoc.setAttribute('text-color', colors.textColor); + rapiDoc.setAttribute('header-color', colors.headerColor); + rapiDoc.setAttribute('primary-color', colors.primaryColor); + rapiDoc.setAttribute('nav-bg-color', colors.navBgColor); + rapiDoc.setAttribute('nav-text-color', colors.navTextColor); + rapiDoc.setAttribute('nav-hover-bg-color', colors.navHoverBgColor); + rapiDoc.setAttribute('nav-hover-text-color', colors.navHoverTextColor); + rapiDoc.setAttribute('nav-accent-color', colors.navAccentColor); + rapiDoc.setAttribute('code-theme', colors.codeTheme); +} + +/** + * Set custom CSS properties on RapiDoc element + */ +function setInputBorderStyles(rapiDoc: HTMLElement): void { + rapiDoc.style.setProperty('--border-color', '#00A3FF'); +} + +/** + * Hide unwanted elements in RapiDoc shadow DOM + */ +function hideExpandCollapseControls(rapiDoc: HTMLElement): void { + const maxAttempts = 10; + let attempts = 0; + + const tryHide = (): void => { + attempts++; + + try { + const shadowRoot = rapiDoc.shadowRoot; + if (!shadowRoot) { + if (attempts < maxAttempts) { + setTimeout(tryHide, 500); + } + return; + } + + // Find elements containing "Expand all" / "Collapse all" and hide them + const allElements = shadowRoot.querySelectorAll('*'); + let hiddenCount = 0; + + allElements.forEach((element) => { + const text = element.textContent || ''; + + if (text.includes('Expand all') || text.includes('Collapse all')) { + (element as HTMLElement).style.display = 'none'; + if (element.parentElement) { + element.parentElement.style.display = 'none'; + } + hiddenCount++; + } + }); + + // Hide "Overview" headings + const headings = shadowRoot.querySelectorAll('h1, h2, h3, h4'); + headings.forEach((heading) => { + const text = (heading.textContent || '').trim(); + if (text.includes('Overview')) { + (heading as HTMLElement).style.display = 'none'; + hiddenCount++; + } + }); + + // Inject CSS as backup + const style = document.createElement('style'); + style.textContent = ` + .section-gap.section-tag, + [id*="overview"], + .regular-font.section-gap:empty, + h1:empty, h2:empty, h3:empty { + display: none !important; + } + `; + shadowRoot.appendChild(style); + + if (hiddenCount === 0 && attempts < maxAttempts) { + setTimeout(tryHide, 500); + } + } catch { + if (attempts < maxAttempts) { + setTimeout(tryHide, 500); + } + } + }; + + setTimeout(tryHide, 500); +} + +/** + * Watch for theme changes via stylesheet toggle + */ +function watchThemeChanges(rapiDoc: HTMLElement): CleanupFn { + const handleThemeChange = (): void => { + applyTheme(rapiDoc); + }; + + // Watch stylesheet disabled attribute changes (Hugo theme.js toggles this) + const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if ( + mutation.type === 'attributes' && + mutation.target instanceof HTMLLinkElement && + mutation.target.title?.includes('theme') + ) { + handleThemeChange(); + break; + } + // Also watch data-theme changes as fallback + if (mutation.attributeName === 'data-theme') { + handleThemeChange(); + } + } + }); + + // Observe head for stylesheet changes + observer.observe(document.head, { + attributes: true, + attributeFilter: ['disabled'], + subtree: true, + }); + + // Observe documentElement for data-theme changes + observer.observe(document.documentElement, { + attributes: true, + attributeFilter: ['data-theme'], + }); + + return (): void => { + observer.disconnect(); + }; +} + +/** + * Initialize RapiDoc component + */ +export default function ApiRapiDoc({ + component, +}: ComponentOptions): CleanupFn | void { + // Find the rapi-doc element inside the container + const rapiDoc = component.querySelector('rapi-doc') as HTMLElement | null; + + if (!rapiDoc) { + console.warn('[API RapiDoc] No rapi-doc element found in container'); + return; + } + + // Apply initial theme + applyTheme(rapiDoc); + + // Set custom CSS properties + if (customElements && customElements.whenDefined) { + customElements.whenDefined('rapi-doc').then(() => { + setInputBorderStyles(rapiDoc); + setTimeout(() => setInputBorderStyles(rapiDoc), 500); + }); + } else { + setInputBorderStyles(rapiDoc); + setTimeout(() => setInputBorderStyles(rapiDoc), 500); + } + + // Hide unwanted UI elements + hideExpandCollapseControls(rapiDoc); + + // Watch for theme changes + return watchThemeChanges(rapiDoc); +} diff --git a/assets/js/components/api-toc.ts b/assets/js/components/api-toc.ts new file mode 100644 index 0000000000..06b0d2f589 --- /dev/null +++ b/assets/js/components/api-toc.ts @@ -0,0 +1,434 @@ +/** + * API Table of Contents Component + * + * Generates "ON THIS PAGE" navigation from content headings or operations data. + * Features: + * - Builds TOC from h2/h3 headings in the active tab panel (legacy) + * - Builds TOC from operations data passed via data-operations attribute (tag-based) + * - Highlights current section on scroll (intersection observer) + * - Smooth scroll to anchors + * - Updates when tab changes + * + * Usage: + * + */ + +interface ComponentOptions { + component: HTMLElement; +} + +interface TocEntry { + id: string; + text: string; + level: number; +} + +/** + * Operation metadata from frontmatter (for tag-based pages) + */ +interface OperationMeta { + operationId: string; + method: string; + path: string; + summary: string; + tags: string[]; +} + +/** + * Check if the active panel contains a RapiDoc component + */ +function isRapiDocActive(): boolean { + const activePanel = document.querySelector( + '.tab-content:not([style*="display: none"]), [data-tab-panel]:not([style*="display: none"])' + ); + return activePanel?.querySelector('rapi-doc') !== null; +} + +/** + * Get headings from the currently visible content + */ +function getVisibleHeadings(): TocEntry[] { + // Find the active tab panel or main content area + const activePanel = document.querySelector( + '.tab-content:not([style*="display: none"]), [data-tab-panel]:not([style*="display: none"]), .article--content' + ); + + if (!activePanel) { + return []; + } + + const headings = activePanel.querySelectorAll('h2, h3'); + const entries: TocEntry[] = []; + + headings.forEach((heading) => { + // Skip headings without IDs + if (!heading.id) { + return; + } + + // Skip hidden headings + const rect = heading.getBoundingClientRect(); + if (rect.width === 0 && rect.height === 0) { + return; + } + + entries.push({ + id: heading.id, + text: heading.textContent?.trim() || '', + level: heading.tagName === 'H2' ? 2 : 3, + }); + }); + + return entries; +} + +/** + * Build TOC HTML from entries + */ +function buildTocHtml(entries: TocEntry[]): string { + if (entries.length === 0) { + // Check if RapiDoc is active - show helpful message + if (isRapiDocActive()) { + return '

Use RapiDoc\'s navigation below to explore this endpoint.

'; + } + return '

No sections on this page.

'; + } + + let html = '
    '; + + entries.forEach((entry) => { + const indent = entry.level === 3 ? ' api-toc-item--nested' : ''; + html += ` +
  • + ${entry.text} +
  • + `; + }); + + html += '
'; + return html; +} + +/** + * Get method badge class for HTTP method + */ +function getMethodClass(method: string): string { + const m = method.toLowerCase(); + switch (m) { + case 'get': + return 'api-method--get'; + case 'post': + return 'api-method--post'; + case 'put': + return 'api-method--put'; + case 'patch': + return 'api-method--patch'; + case 'delete': + return 'api-method--delete'; + default: + return ''; + } +} + +/** + * Build TOC HTML from operations data (for tag-based pages) + */ +function buildOperationsTocHtml(operations: OperationMeta[]): string { + if (operations.length === 0) { + return '

No operations on this page.

'; + } + + let html = '
    '; + + operations.forEach((op) => { + // Generate anchor ID from operationId (Scalar uses operationId for anchors) + const anchorId = op.operationId; + const methodClass = getMethodClass(op.method); + + html += ` +
  • + + ${op.method.toUpperCase()} + ${op.path} + +
  • + `; + }); + + html += '
'; + return html; +} + +/** + * Parse operations from data attribute + */ +function parseOperationsData(component: HTMLElement): OperationMeta[] | null { + const dataAttr = component.getAttribute('data-operations'); + if (!dataAttr) { + return null; + } + + try { + const operations = JSON.parse(dataAttr) as OperationMeta[]; + return Array.isArray(operations) ? operations : null; + } catch (e) { + console.warn('[API TOC] Failed to parse operations data:', e); + return null; + } +} + +/** + * Set up intersection observer for scroll highlighting + */ +function setupScrollHighlighting( + container: HTMLElement, + entries: TocEntry[] +): IntersectionObserver | null { + if (entries.length === 0) { + return null; + } + + const headingIds = entries.map((e) => e.id); + const links = container.querySelectorAll('.api-toc-link'); + + // Create a map of heading ID to link element + const linkMap = new Map(); + links.forEach((link) => { + const href = link.getAttribute('href'); + if (href?.startsWith('#')) { + linkMap.set(href.slice(1), link); + } + }); + + // Track which headings are visible + const visibleHeadings = new Set(); + + const observer = new IntersectionObserver( + (observerEntries) => { + observerEntries.forEach((entry) => { + const id = entry.target.id; + + if (entry.isIntersecting) { + visibleHeadings.add(id); + } else { + visibleHeadings.delete(id); + } + }); + + // Find the first visible heading (in document order) + let activeId: string | null = null; + for (const id of headingIds) { + if (visibleHeadings.has(id)) { + activeId = id; + break; + } + } + + // If no heading is visible, use the last one that was scrolled past + if (!activeId && visibleHeadings.size === 0) { + const scrollY = window.scrollY; + for (let i = headingIds.length - 1; i >= 0; i--) { + const heading = document.getElementById(headingIds[i]); + if (heading && heading.offsetTop < scrollY + 100) { + activeId = headingIds[i]; + break; + } + } + } + + // Update active state on links + links.forEach((link) => { + link.classList.remove('is-active'); + }); + + if (activeId) { + const activeLink = linkMap.get(activeId); + activeLink?.classList.add('is-active'); + } + }, + { + rootMargin: '-80px 0px -70% 0px', + threshold: 0, + } + ); + + // Observe all headings + headingIds.forEach((id) => { + const heading = document.getElementById(id); + if (heading) { + observer.observe(heading); + } + }); + + return observer; +} + +/** + * Set up smooth scroll for TOC links + */ +function setupSmoothScroll(container: HTMLElement): void { + container.addEventListener('click', (event) => { + const target = event.target as HTMLElement; + const link = target.closest('.api-toc-link'); + + if (!link) { + return; + } + + const href = link.getAttribute('href'); + if (!href?.startsWith('#')) { + return; + } + + const targetElement = document.getElementById(href.slice(1)); + if (!targetElement) { + return; + } + + event.preventDefault(); + + // Scroll with offset for fixed header + const headerOffset = 80; + const elementPosition = targetElement.getBoundingClientRect().top; + const offsetPosition = elementPosition + window.scrollY - headerOffset; + + window.scrollTo({ + top: offsetPosition, + behavior: 'smooth', + }); + + // Update URL hash without jumping + history.pushState(null, '', href); + }); +} + +/** + * Update TOC visibility based on active tab + * Hide TOC for Operations tab (RapiDoc has built-in navigation) + */ +function updateTocVisibility(container: HTMLElement): void { + const operationsPanel = document.querySelector( + '[data-tab-panel="operations"]' + ); + const isOperationsVisible = + operationsPanel && + !operationsPanel.getAttribute('style')?.includes('display: none'); + + if (isOperationsVisible) { + container.classList.add('is-hidden'); + } else { + container.classList.remove('is-hidden'); + } +} + +/** + * Watch for tab changes to rebuild TOC + */ +function watchTabChanges( + container: HTMLElement, + rebuild: () => void +): MutationObserver { + const tabPanels = document.querySelector('.api-tab-panels'); + + if (!tabPanels) { + return new MutationObserver(() => {}); + } + + const observer = new MutationObserver((mutations) => { + // Check if any tab panel visibility changed + const hasVisibilityChange = mutations.some((mutation) => { + return ( + mutation.type === 'attributes' && + (mutation.attributeName === 'style' || + mutation.attributeName === 'class') + ); + }); + + if (hasVisibilityChange) { + // Update visibility based on active tab + updateTocVisibility(container); + // Debounce rebuild + setTimeout(rebuild, 100); + } + }); + + observer.observe(tabPanels, { + attributes: true, + subtree: true, + attributeFilter: ['style', 'class'], + }); + + return observer; +} + +/** + * Initialize API TOC component + */ +export default function ApiToc({ component }: ComponentOptions): void { + const nav = component.querySelector('.api-toc-nav'); + + if (!nav) { + console.warn('[API TOC] No .api-toc-nav element found'); + return; + } + + // Check for operations data (tag-based pages) + const operations = parseOperationsData(component); + let observer: IntersectionObserver | null = null; + + /** + * Rebuild the TOC + */ + function rebuild(): void { + // Clean up previous observer + if (observer) { + observer.disconnect(); + observer = null; + } + + // If operations data is present, build operations-based TOC + if (operations && operations.length > 0) { + if (nav) { + nav.innerHTML = buildOperationsTocHtml(operations); + } + // Don't hide TOC for tag-based pages - always show operations + component.classList.remove('is-hidden'); + return; + } + + // Otherwise, fall back to heading-based TOC + const entries = getVisibleHeadings(); + if (nav) { + nav.innerHTML = buildTocHtml(entries); + } + + // Set up scroll highlighting + observer = setupScrollHighlighting(component, entries); + } + + // Check initial visibility (hide for Operations tab, only for non-operations pages) + if (!operations || operations.length === 0) { + updateTocVisibility(component); + } + + // Initial build + rebuild(); + + // Set up smooth scroll + setupSmoothScroll(component); + + // Watch for tab changes (only for non-operations pages) + if (!operations || operations.length === 0) { + watchTabChanges(component, rebuild); + } + + // Also rebuild on window resize (headings may change visibility) + let resizeTimeout: number; + window.addEventListener('resize', () => { + clearTimeout(resizeTimeout); + resizeTimeout = window.setTimeout(rebuild, 250); + }); +} diff --git a/assets/js/components/rapidoc-mini.ts b/assets/js/components/rapidoc-mini.ts new file mode 100644 index 0000000000..fba66fb47e --- /dev/null +++ b/assets/js/components/rapidoc-mini.ts @@ -0,0 +1,531 @@ +/** + * RapiDoc Mini Component + * + * Initializes RapiDoc Mini web component for single API operation rendering. + * Features: + * - Dynamic CDN loading (memoized across instances) + * - Theme synchronization with Hugo theme system + * - Multiple instance support (no hardcoded IDs) + * - Cleanup function for proper teardown + * + * Usage: + *
+ *
+ */ + +import { getPreference } from '../services/local-storage.js'; + +interface ComponentOptions { + component: HTMLElement; +} + +interface ThemeConfig { + theme: 'light' | 'dark'; + bgColor: string; + textColor: string; + primaryColor: string; + navBgColor: string; + navTextColor: string; + navHoverBgColor: string; + navHoverTextColor: string; +} + +type CleanupFn = () => void; + +// Use full RapiDoc for proper auth tooltip behavior +// (mini version has limited features) +const RAPIDOC_CDN = 'https://unpkg.com/rapidoc/dist/rapidoc-min.js'; +const RAPIDOC_ELEMENT = 'rapi-doc'; + +// Memoization: track script loading state +let scriptLoadPromise: Promise | null = null; + +/** + * Load RapiDoc Mini script from CDN (memoized) + */ +function loadRapiDocScript(timeout = 10000): Promise { + // Return existing promise if already loading + if (scriptLoadPromise) { + return scriptLoadPromise; + } + + // Check if custom element already registered + if (customElements.get(RAPIDOC_ELEMENT)) { + return Promise.resolve(); + } + + scriptLoadPromise = new Promise((resolve, reject) => { + // Check if script tag already exists + const existing = Array.from(document.scripts).find( + (s) => s.src && s.src.includes('rapidoc') + ); + + if (existing && customElements.get(RAPIDOC_ELEMENT)) { + return resolve(); + } + + const script = document.createElement('script'); + script.type = 'module'; + script.src = RAPIDOC_CDN; + + script.onload = () => { + // Poll for custom element registration + const startTime = Date.now(); + const pollInterval = setInterval(() => { + if (customElements.get(RAPIDOC_ELEMENT)) { + clearInterval(pollInterval); + resolve(); + } else if (Date.now() - startTime > timeout) { + clearInterval(pollInterval); + reject(new Error('RapiDoc Mini custom element not registered')); + } + }, 50); + }; + + script.onerror = () => { + scriptLoadPromise = null; // Reset on error for retry + reject(new Error(`Failed to load RapiDoc Mini from ${RAPIDOC_CDN}`)); + }; + + document.head.appendChild(script); + }); + + return scriptLoadPromise; +} + +/** + * Get current theme from localStorage + */ +function getTheme(): 'dark' | 'light' { + const theme = getPreference('theme'); + return theme === 'dark' ? 'dark' : 'light'; +} + +/** + * Get theme configuration for RapiDoc Mini + * Colors matched to Hugo theme SCSS variables: + * - Dark: _theme-dark.scss + * - Light: _theme-light.scss + */ +function getThemeConfig(isDark: boolean): ThemeConfig { + return isDark + ? { + theme: 'dark', + bgColor: '#14141F', // $grey10 ($article-bg in dark theme) + textColor: '#D4D7DD', // $g15-platinum ($article-text in dark theme) + primaryColor: '#00A3FF', // $b-pool ($article-link) + navBgColor: '#07070E', // $grey5 ($body-bg in dark theme) + navTextColor: '#D4D7DD', // $g15-platinum ($nav-item) + navHoverBgColor: '#00A3FF', // $b-pool + navHoverTextColor: '#FFFFFF', // $g20-white + } + : { + theme: 'light', + bgColor: '#FFFFFF', // $g20-white ($article-bg in light theme) + textColor: '#020a47', // $br-dark-blue ($article-text in light theme) + primaryColor: '#00A3FF', // $b-pool ($article-link) + navBgColor: '#f3f4fb', // $body-bg in light theme + navTextColor: '#757888', // $g9-mountain ($nav-item) + navHoverBgColor: '#BF2FE5', // $br-magenta ($nav-item-hover) + navHoverTextColor: '#FFFFFF', // $g20-white + }; +} + +/** + * Apply theme attributes to RapiDoc Mini element + */ +function applyTheme(element: HTMLElement): void { + const isDark = getTheme() === 'dark'; + const config = getThemeConfig(isDark); + + // Core theme colors + element.setAttribute('theme', config.theme); + element.setAttribute('bg-color', config.bgColor); + element.setAttribute('text-color', config.textColor); + element.setAttribute('primary-color', config.primaryColor); + + // Navigation colors (for any internal nav elements) + element.setAttribute('nav-bg-color', config.navBgColor); + element.setAttribute('nav-text-color', config.navTextColor); + element.setAttribute('nav-hover-bg-color', config.navHoverBgColor); + element.setAttribute('nav-hover-text-color', config.navHoverTextColor); + + // Accent color - prevent green defaults + element.setAttribute('nav-accent-color', config.primaryColor); +} + +/** + * Build match pattern that identifies operations within a spec. + * + * When using path-specific specs (recommended), the spec contains only one path, + * so matchPaths is just the HTTP method (e.g., "post"). The path isolation at the + * file level prevents substring matching issues - no title needed. + * + * When using tag-based specs (fallback), matchPaths includes the full path + * (e.g., "post /api/v3/configure/token/admin"). Adding the title helps differentiate + * operations whose paths are prefixes of each other. + * + * RapiDoc's search string format: + * `${method} ${path} ${summary} ${description} ${operationId} ${tagName}`.toLowerCase() + * + * @param matchPaths - The match pattern: just method for path-specific specs, + * or "method /path" for tag-based specs + * @param title - Optional page title to append (only used for tag-based specs) + * @returns Pattern for RapiDoc's match-paths attribute + */ +function buildMatchPattern(matchPaths: string, title?: string): string { + // Detect path-specific spec mode: matchPaths is just an HTTP method (no path) + const isMethodOnly = /^(get|post|put|patch|delete|options|head|trace)$/i.test( + matchPaths.trim() + ); + + // For path-specific specs: use method only, title not needed (path isolated at file level) + // For tag-based specs: append title to differentiate prefix conflicts + if (title && !isMethodOnly) { + return `${matchPaths} ${title.toLowerCase()}`; + } + return matchPaths; +} + +/** + * Create RapiDoc Mini element with configuration + */ +function createRapiDocElement( + specUrl: string, + matchPaths?: string, + title?: string +): HTMLElement { + const element = document.createElement(RAPIDOC_ELEMENT); + + // Core attributes + element.setAttribute('spec-url', specUrl); + + // Set match-paths filter. With path-specific specs, this is just the method. + // With tag-based specs, includes path + optional title for uniqueness. + if (matchPaths) { + element.setAttribute('match-paths', buildMatchPattern(matchPaths, title)); + } + + // Typography - match docs theme fonts + element.setAttribute( + 'regular-font', + 'Proxima Nova, -apple-system, BlinkMacSystemFont, sans-serif' + ); + element.setAttribute( + 'mono-font', + 'IBM Plex Mono, Monaco, Consolas, monospace' + ); + element.setAttribute('font-size', 'default'); // Match surrounding content size + + // Layout - use 'read' style for compact, single-operation display + // + // EXPERIMENTAL FINDINGS (Task 4 - API Security Schemes): + // ----------------------------------------------------- + // RapiDoc's `allow-authentication="true"` DOES NOT show auth input + // on operation pages when using `match-paths` to filter to a single + // operation. Here's what was tested: + // + // 1. render-style="read" + allow-authentication="true": + // - Auth section (#auth) exists in shadow DOM with input fields + // - BUT it's not visible (filtered out by match-paths) + // - Only shows the matched operation, not the full spec + // - Found: username/password inputs for Basic auth in shadow DOM + // - Result: NO visible auth UI for users + // + // 2. render-style="focused" + allow-authentication="true": + // - Auth section completely removed from shadow DOM + // - Shows links to #auth section that don't exist (broken links) + // - Lists security schemes but no input fields + // - Result: NO auth section at all + // + // CONCLUSION: + // RapiDoc's built-in authentication UI is incompatible with + // match-paths filtering. The auth section is either hidden or + // completely removed when filtering to single operations. + // For credential input on operation pages, we need a custom + // component (Task 5). + // + // RECOMMENDATION: + // - Keep render-style="read" for compact operation display + // - Implement custom auth input component above RapiDoc (Task 5) + // - Use sessionStorage to pass credentials to "Try it" feature + element.setAttribute('layout', 'column'); + element.setAttribute('render-style', 'read'); + element.setAttribute('show-header', 'false'); + element.setAttribute('allow-server-selection', 'false'); + + // Schema display - use 'table' style to reduce parameter indentation + element.setAttribute('schema-style', 'table'); + element.setAttribute('default-schema-tab', 'schema'); + element.setAttribute('paths-expanded', 'true'); + element.setAttribute('schema-expand-level', '1'); + + // Interactivity + element.setAttribute('allow-try', 'true'); + element.setAttribute('fill-request-fields-with-example', 'true'); + + // Reduce excessive spacing + element.setAttribute('use-path-in-nav-bar', 'false'); + element.setAttribute('show-info', 'false'); + + // Authentication display - hide RapiDoc's built-in auth section + // We use a custom popover component for credential input instead + // Credentials are applied via HTML attributes (api-key-name, api-key-value) + // and the setApiKey() JavaScript API + element.setAttribute('allow-authentication', 'false'); + element.setAttribute('show-components', 'false'); + + // Custom CSS for internal style overrides (table layout, etc.) + element.setAttribute('css-file', '/css/rapidoc-custom.css'); + + // Override method colors to use theme primary color instead of green + element.setAttribute('post-color', '#00A3FF'); // $b-pool instead of green + element.setAttribute('get-color', '#00A3FF'); + element.setAttribute('put-color', '#9394FF'); // $br-galaxy + element.setAttribute('delete-color', '#BF3D5E'); // $r-ruby + element.setAttribute('patch-color', '#9394FF'); + + // Apply initial theme + applyTheme(element); + + return element; +} + +/** + * Inject custom styles into RapiDoc's shadow DOM + * Removes the top border and reduces whitespace above operations + */ +function injectShadowStyles(element: HTMLElement): void { + const tryInject = (): boolean => { + const shadowRoot = (element as unknown as { shadowRoot: ShadowRoot | null }) + .shadowRoot; + if (!shadowRoot) return false; + + // Check if styles already injected + if (shadowRoot.querySelector('#rapidoc-custom-styles')) return true; + + const style = document.createElement('style'); + style.id = 'rapidoc-custom-styles'; + style.textContent = ` + /* Hide the operation divider line */ + .divider[part="operation-divider"] { + display: none !important; + } + + /* Reduce spacing above operation sections */ + .section-gap { + padding-top: 0 !important; + } + + /* Hide RapiDoc's built-in security section - we show our own */ + /* Target the authorization requirements shown near each operation */ + .api-key, + .api-key-info, + .security-info-button, + [class*="api-key"], + [class*="security-info"], + .m-markdown-small:has(.lock-icon), + div:has(> .lock-icon), + /* Target the section showing "AUTHORIZATIONS:" or similar */ + .req-resp-container > div:first-child:has(svg[style*="lock"]), + /* Target lock icons and their parent containers */ + svg.lock-icon, + .lock-icon, + /* Wide selectors for security-related elements */ + [part="section-operation-security"], + .expanded-endpoint-body > div:first-child:has([class*="lock"]) { + display: none !important; + } + `; + shadowRoot.appendChild(style); + + // Hide security badge elements by examining content + const hideSecurityBadge = () => { + // Find elements containing security-related text and hide their container + const allElements = shadowRoot.querySelectorAll('span, div'); + allElements.forEach((el) => { + const text = el.textContent?.trim(); + // Find leaf elements that contain authorization-related text + if ( + el.children.length === 0 && + (text === 'HTTP Bearer' || + text === 'Bearer' || + text === 'AUTHORIZATIONS:' || + text === 'Authorization' || + text === 'api_token' || + text === 'BearerAuthentication') + ) { + // Walk up the DOM to find a suitable container to hide + // This hides both the text AND any sibling icons (like lock) + let target: HTMLElement = el as HTMLElement; + let parent: HTMLElement | null = el.parentElement; + let depth = 0; + while (parent && depth < 4) { + // Stop at reasonable container boundaries + if ( + parent.classList.contains('expanded-endpoint-body') || + parent.classList.contains('req-resp-container') || + parent.tagName === 'SECTION' + ) { + break; + } + target = parent; + parent = parent.parentElement; + depth++; + } + target.style.display = 'none'; + } + }); + }; + + // Run immediately and after delays for dynamic content + hideSecurityBadge(); + setTimeout(hideSecurityBadge, 300); + setTimeout(hideSecurityBadge, 800); + + // Watch for dynamically added security elements + const observer = new MutationObserver(() => { + hideSecurityBadge(); + }); + observer.observe(shadowRoot, { + childList: true, + subtree: true, + }); + + // Disconnect after 5 seconds to avoid performance issues + setTimeout(() => observer.disconnect(), 5000); + + return true; + }; + + // Try immediately + if (tryInject()) return; + + // Retry a few times as shadow DOM may not be ready + let attempts = 0; + const maxAttempts = 10; + const interval = setInterval(() => { + attempts++; + if (tryInject() || attempts >= maxAttempts) { + clearInterval(interval); + } + }, 100); +} + +/** + * Watch for theme changes and update RapiDoc element + */ +function watchThemeChanges(container: HTMLElement): CleanupFn { + let currentElement: HTMLElement | null = + container.querySelector(RAPIDOC_ELEMENT); + + const handleThemeChange = (): void => { + if (currentElement) { + applyTheme(currentElement); + } + }; + + // Watch stylesheet changes (Hugo theme.js enables/disables stylesheets) + const styleObserver = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if ( + mutation.type === 'attributes' && + mutation.target instanceof HTMLLinkElement && + mutation.target.title?.includes('theme') + ) { + handleThemeChange(); + break; + } + } + }); + + const head = document.querySelector('head'); + if (head) { + styleObserver.observe(head, { + attributes: true, + attributeFilter: ['disabled'], + subtree: true, + }); + } + + // Watch localStorage changes from other tabs + const storageHandler = (event: StorageEvent): void => { + if (event.key === 'influxdata_docs_preferences' && event.newValue) { + try { + const prefs = JSON.parse(event.newValue); + if (prefs.theme) { + handleThemeChange(); + } + } catch (error) { + console.error('[RapiDoc Mini] Failed to parse preferences:', error); + } + } + }; + + window.addEventListener('storage', storageHandler); + + // Return cleanup function + return (): void => { + styleObserver.disconnect(); + window.removeEventListener('storage', storageHandler); + }; +} + +/** + * Show error message in container + */ +function showError(container: HTMLElement, message: string): void { + container.innerHTML = ` +
+

Error loading API documentation

+

${message}

+
+ `; +} + +/** + * Initialize RapiDoc Mini component + */ +export default async function RapiDocMini({ + component, +}: ComponentOptions): Promise { + try { + // Get configuration from data attributes + const specUrl = component.dataset.specUrl; + const matchPaths = component.dataset.matchPaths; + const title = component.dataset.title; + + if (!specUrl) { + console.error('[RapiDoc Mini] No data-spec-url attribute provided'); + showError(component, 'No API specification configured.'); + return; + } + + // Load RapiDoc Mini from CDN (memoized) + try { + await loadRapiDocScript(); + } catch (error) { + console.error('[RapiDoc Mini] Failed to load from CDN:', error); + showError( + component, + 'Failed to load API viewer. Please refresh the page.' + ); + return; + } + + // Create and append RapiDoc Mini element + const rapiDocElement = createRapiDocElement(specUrl, matchPaths, title); + component.appendChild(rapiDocElement); + + // Inject custom styles into shadow DOM to remove borders/spacing + injectShadowStyles(rapiDocElement); + + // Watch for theme changes and return cleanup function + return watchThemeChanges(component); + } catch (error) { + console.error('[RapiDoc Mini] Component initialization error:', error); + showError(component, 'API viewer failed to initialize.'); + } +} diff --git a/assets/js/content-interactions.js b/assets/js/content-interactions.js index eb9b4e1bc0..4c2a374c0a 100644 --- a/assets/js/content-interactions.js +++ b/assets/js/content-interactions.js @@ -122,21 +122,29 @@ function expandAccordions() { // Expand accordions on load based on URL anchor function openAccordionByHash() { - var anchor = window.location.hash; + var hash = window.location.hash; + if (!hash || hash.length <= 1) return; + + // Use native DOM method to handle special characters in IDs (like /) + var id = hash.substring(1); // Remove leading # + var anchorElement = document.getElementById(id); + if (!anchorElement) return; + + var $anchor = $(anchorElement); function expandElement() { - if ($(anchor).parents('.expand').length > 0) { - return $(anchor).closest('.expand').children('.expand-label'); - } else if ($(anchor).hasClass('expand')) { - return $(anchor).children('.expand-label'); + if ($anchor.parents('.expand').length > 0) { + return $anchor.closest('.expand').children('.expand-label'); + } else if ($anchor.hasClass('expand')) { + return $anchor.children('.expand-label'); } + return null; } - if (expandElement() != null) { - if (expandElement().children('.expand-toggle').hasClass('open')) { - // Do nothing? - } else { - expandElement().children('.expand-toggle').trigger('click'); + var $expandLabel = expandElement(); + if ($expandLabel != null) { + if (!$expandLabel.children('.expand-toggle').hasClass('open')) { + $expandLabel.children('.expand-toggle').trigger('click'); } } } diff --git a/assets/js/main.js b/assets/js/main.js index 826ad9a116..5cfb750988 100644 --- a/assets/js/main.js +++ b/assets/js/main.js @@ -46,6 +46,10 @@ import SidebarSearch from './components/sidebar-search.js'; import { SidebarToggle } from './sidebar-toggle.js'; import Theme from './theme.js'; import ThemeSwitch from './theme-switch.js'; +import ApiAuthInput from './components/api-auth-input.ts'; +import ApiRapiDoc from './components/api-rapidoc.ts'; +import ApiToc from './components/api-toc.ts'; +import RapiDocMini from './components/rapidoc-mini.ts'; /** * Component Registry @@ -77,6 +81,10 @@ const componentRegistry = { 'sidebar-toggle': SidebarToggle, theme: Theme, 'theme-switch': ThemeSwitch, + 'api-auth-input': ApiAuthInput, + 'api-rapidoc': ApiRapiDoc, + 'api-toc': ApiToc, + 'rapidoc-mini': RapiDocMini, }; /** diff --git a/assets/styles/layouts/_api-layout.scss b/assets/styles/layouts/_api-layout.scss new file mode 100644 index 0000000000..c3c6012c5a --- /dev/null +++ b/assets/styles/layouts/_api-layout.scss @@ -0,0 +1,786 @@ +/////////////////////////////// API Reference Layout /////////////////////////////// +// +// 3-column layout for API reference documentation: +// - Left: Existing Hugo sidebar + API navigation section +// - Center: Content with page-level tabs (Operations | Server | Auth | Compatibility) +// - Right: "ON THIS PAGE" table of contents +// +//////////////////////////////////////////////////////////////////////////////// + +// Content wrapper becomes flex container when used with API content +// Override overflow:hidden from _content-wrapper.scss to enable sticky positioning +.content-wrapper.api-content { + display: flex; + flex-direction: row; + align-items: flex-start; + overflow: visible; // Required for sticky TOC to work +} + +// Main API content area (center column) +.api-main { + flex: 1; + min-width: 0; // Prevent flex item from overflowing + padding-right: 1rem; +} + +// Right-side TOC (third column) +.api-toc { + width: 200px; + flex-shrink: 0; + position: sticky; + top: 80px; // Account for fixed header height + align-self: flex-start; // Critical for sticky to work in flexbox + max-height: calc(100vh - 100px); + overflow-y: auto; + padding: 1rem; + border-left: 1px solid $nav-border; + + // Hidden state (used when Operations/RapiDoc tab is active) + &.is-hidden { + display: none; + } + + &-header { + font-size: 0.75rem; + font-weight: $bold; + text-transform: uppercase; + letter-spacing: 0.08rem; + color: rgba($article-heading, 0.5); + margin: 0 0 1rem; + } + + &-nav { + // TOC list styles + .api-toc-list { + list-style: none; + margin: 0; + padding: 0; + } + + .api-toc-item { + margin: 0; + + &--nested { + padding-left: 0.75rem; + } + } + + .api-toc-link { + display: block; + padding: 0.35rem 0; + font-size: 0.85rem; + color: $nav-item; + text-decoration: none; + transition: color 0.2s; + line-height: 1.4; + + &:hover { + color: $nav-item-hover; + } + + &.is-active { + color: $nav-active; + font-weight: $medium; + } + } + } + + &-empty { + font-size: 0.85rem; + color: rgba($article-text, 0.5); + font-style: italic; + } + + // Operations-based TOC (for tag-based pages) + &-nav .api-toc-list--operations { + .api-toc-item--operation { + margin: 0.35rem 0; + } + + .api-toc-link--operation { + display: flex; + align-items: center; + gap: 0.5rem; + font-size: 0.8rem; + padding: 0.3rem 0; + } + + // HTTP method badges in TOC + .api-method { + display: inline-block; + font-size: 0.6rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.15rem 0.3rem; + border-radius: 3px; + min-width: 2.2rem; + text-align: center; + flex-shrink: 0; + + &--get { background-color: $b-pool; color: #fff; } // #00A3FF - bright brand blue + &--post { background-color: $gr-rainforest; color: #fff; } // #34BB55 - bright brand green + &--put { background-color: $y-pineapple; color: #fff; } // #FFB94A - bright yellow (distinct from red) + &--patch { background-color: $br-new-purple; color: #fff; } // #9b2aff - distinctive brand purple + &--delete { background-color: $r-curacao; color: #fff; } // #F95F53 - bright brand red + } + + .api-path { + font-family: $code; + font-size: 0.75rem; + word-break: break-all; + color: inherit; + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////// Operations List (Main Content) ////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Operations list section +.api-operations-list { + margin: 2rem 0; + + h2 { + margin-bottom: 1rem; + } +} + +// Grid container for operation cards +.api-operations-grid { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +// Individual operation card (clickable link) +.api-operation-card { + display: flex; + align-items: flex-start; + gap: 0.75rem; + padding: 0.75rem 1rem; + background: rgba($article-bg, 0.5); + border: 1px solid $nav-border; + border-radius: $radius; + text-decoration: none; + color: $article-text; + transition: background-color 0.2s, border-color 0.2s; + + &:hover { + background: rgba($article-bg, 0.8); + border-color: $nav-item-hover; + } + + // HTTP method badge + .api-method { + display: inline-block; + font-size: 0.7rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.2rem 0.4rem; + border-radius: 3px; + min-width: 3.5rem; + text-align: center; + flex-shrink: 0; + margin-top: 0.15rem; + border: 2px solid; + background-color: transparent; + + &--get { border-color: $b-pool; color: $b-pool; } // #00A3FF - bright brand blue + &--post { border-color: $gr-rainforest; color: $gr-rainforest; } // #34BB55 - bright brand green + &--put { border-color: $y-pineapple; color: $y-pineapple; } // #FFB94A - bright yellow (distinct from red) + &--patch { border-color: $br-new-purple; color: $br-new-purple; } // #9b2aff - distinctive brand purple + &--delete { border-color: $r-curacao; color: $r-curacao; } // #F95F53 - bright brand red + } + + // API path in monospace + .api-path { + font-family: $code; + font-size: 0.9rem; + color: $article-heading; + word-break: break-all; + flex-shrink: 0; + } + + // Operation summary text + .api-operation-summary { + font-size: 0.875rem; + color: rgba($article-text, 0.8); + flex: 1; + } +} + +// Overview/Description section +.api-description { + margin: 2rem 0; + color: $article-text !important; // Override any inherited black color + + h2 { + margin-bottom: 1rem; + } + + // Ensure description text is visible and readable + p, ul, ol, pre, code { + color: $article-text !important; + opacity: 1; + } + + // Also ensure direct text nodes use correct color + & > * { + color: $article-text !important; + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////// API Navigation in Sidebar /////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// API navigation wrapper - controls visibility +// Hidden by default, revealed via JS (localStorage) or on API pages +.api-nav-wrapper { + display: none; // Hidden by default + + &.is-revealed { + display: block; // Revealed via JS + } + + // Always show on API pages (server-rendered with .api-reference class) + .api-reference & { + display: block; + } +} + +// API navigation section added to the existing Hugo sidebar +.api-nav { + margin-top: 2rem; + padding-top: 1rem; + border-top: 1px solid $nav-border; + + &-header { + font-size: 0.85rem; + font-weight: $bold; + text-transform: uppercase; + letter-spacing: 0.06rem; + color: rgba($article-heading, 0.6); + margin: 0 0 1rem; + padding-left: 1.5rem; + } + + // API nav groups (collapsible sections) + &-group { + margin-bottom: 0.5rem; + + &-header { + display: flex; + align-items: center; + padding: 0.5rem 0 0.5rem 1.5rem; + font-weight: $medium; + color: $nav-category; + cursor: pointer; + transition: color 0.2s; + // Button reset for dark mode compatibility + background: none; + border: none; + width: 100%; + text-align: left; + font-size: 1.2rem; // Match sidebar .nav-category > a (19.2px) + font-family: inherit; + text-decoration: none; // For anchor version + + &:hover { + color: $nav-category-hover; + } + + &.is-active { + color: $nav-active; + } + + // Collapse/expand indicator (for button headers) + &::before { + content: ""; + display: inline-block; + width: 0; + height: 0; + margin-right: 0.5rem; + border-left: 5px solid $nav-border; + border-top: 4px solid transparent; + border-bottom: 4px solid transparent; + transition: transform 0.2s; + flex-shrink: 0; + } + + &.is-open::before { + transform: rotate(90deg); + } + } + + // For anchor headers, keep the ::before arrow (same as button) + // No special handling needed - anchor headers look the same as button headers + a#{&}-header { + // Same styling as button, arrow works via ::before + } + + &-items { + list-style: none; + padding-left: 2.5rem; + margin: 0; + max-height: 0; + overflow: hidden; + transition: max-height 0.3s ease-out; + background: $body-bg; // Match sidebar background + + &.is-open { + max-height: 2000px; // Large enough to show all operations + } + } + } + + // Individual API nav items + &-item { + margin: 0.25rem 0; + position: relative; + + a { + display: flex; + align-items: center; + padding: 0.35rem 0; + color: $nav-item; + text-decoration: none; + font-size: 0.95rem; + transition: color 0.2s; + + &:hover { + color: $nav-item-hover; + } + } + + &.is-active a { + color: $nav-active; + font-weight: $medium; + } + + // HTTP method badge (legacy class) + .method-badge { + display: inline-block; + font-size: 0.65rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.15rem 0.35rem; + margin-right: 0.5rem; + border-radius: 3px; + min-width: 2.5rem; + text-align: center; + + &.get { background-color: $gr-rainforest; color: #fff; } + &.post { background-color: $b-ocean; color: #fff; } + &.put { background-color: $br-galaxy; color: #fff; } + &.patch { background-color: $y-thunder; color: rgba($g5-pepper, 0.75); } + &.delete { background-color: $r-curacao; color: #fff; } + } + + // Tag items that link to tag pages + &.api-nav-tag { + > a { + font-weight: $medium; + } + + // Nested operations list under tag + .api-nav-operations { + list-style: none; + margin: 0.25rem 0 0.5rem; + padding-left: 0.75rem; + + .api-nav-operation { + margin: 0.15rem 0; + + a { + display: flex; + align-items: center; + gap: 0.4rem; + font-size: 0.85rem; + padding: 0.25rem 0; + } + } + } + } + + // Operation items with method badges + &.api-nav-operation, + .api-nav-operation { + .api-method { + display: inline-block; + font-size: 0.55rem; + font-weight: $bold; + text-transform: uppercase; + padding: 0.1rem 0.25rem; + border-radius: 3px; + min-width: 2rem; + text-align: center; + flex-shrink: 0; + + &--get { background-color: $b-pool; color: #fff; } // #00A3FF - bright brand blue + &--post { background-color: $gr-rainforest; color: #fff; } // #34BB55 - bright brand green + &--put { background-color: $y-pineapple; color: #fff; } // #FFB94A - bright yellow (distinct from red) + &--patch { background-color: $br-new-purple; color: #fff; } // #9b2aff - distinctive brand purple + &--delete { background-color: $r-curacao; color: #fff; } // #F95F53 - bright brand red + } + + .api-path { + font-family: $code; + font-size: 0.85rem; + word-break: break-all; + color: inherit; + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////// API Header with Actions //////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Header row with title and download button +.article--header-row { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: 1rem; + flex-wrap: wrap; +} + +.article--header-text { + flex: 1 1 100%; // Take full width, allowing download button to wrap + min-width: 0; +} + +// Summary paragraph in header - ensure full width +.article--summary { + max-width: none; + width: 100%; +} + +// Download OpenAPI spec button +.api-spec-actions { + flex-shrink: 0; +} + +.api-spec-download { + display: inline-flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem 1rem; + background-color: $g20-white; + color: $article-text; + text-decoration: none; + border-radius: $radius; + font-size: 0.875rem; + font-weight: $medium; + transition: background-color 0.2s, color 0.2s; + border: 1px solid $nav-border; + white-space: nowrap; + + &:hover { + background-color: $r-curacao; + color: $g20-white; + border-color: $r-curacao; + } + + svg { + flex-shrink: 0; + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////////////// API Tabs //////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// API-specific tab wrapper (uses api-tabs-wrapper to avoid conflict with +// tabbed-content.js which handles .tabs-wrapper elements) +.api-tabs-wrapper { + margin: 1.5rem 0 1rem; +} + +// API tab navigation bar +.api-tabs-nav { + display: flex; + flex-wrap: wrap; + gap: 2px; + + a { + flex-grow: 1; + position: relative; + font-size: 1rem; + font-weight: $medium; + padding: 0.65rem 1.25rem; + display: inline-block; + white-space: nowrap; + text-align: center; + color: $article-tab-text !important; + border-radius: $radius; + background-color: $article-tab-bg; + text-decoration: none; + transition: background-color 0.2s, color 0.2s; + z-index: 1; + + &::after { + content: ''; + position: absolute; + display: block; + top: 0; + right: 0; + width: 100%; + height: 100%; + border-radius: $radius; + @include gradient($article-btn-gradient); + opacity: 0; + transition: opacity 0.2s; + z-index: -1; + } + + &:hover { + color: $article-tab-active-text !important; + &::after { + opacity: 1; + } + } + + &.is-active { + color: $article-tab-active-text !important; + &::after { + opacity: 1; + @include gradient($article-btn-gradient); + } + } + } +} + +// Tab panels container +.api-tab-panels { + // Tab content visibility (follows existing pattern) + .tab-content:not(:first-of-type) { + display: none; + } + + // RapiDoc container styling + rapi-doc { + display: block; + width: 100%; + min-height: 400px; + } +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////////// RapiDoc Overrides /////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Hide RapiDoc's internal navigation (we provide our own) +rapi-doc::part(section-navbar) { + display: none !important; +} + +// Hide RapiDoc's internal tag headers/titles (we use custom tabs for navigation) +// label-tag-title is the "PROCESSING ENGINE" header with auth badges shown in tag groups +rapi-doc::part(label-tag-title) { + display: none !important; +} + +// Hide RapiDoc's authentication section (we have separate Auth tab) +rapi-doc::part(section-auth) { + display: none !important; +} + +// Ensure RapiDoc content fills available space +rapi-doc::part(section-main-content) { + padding: 0; +} + +// Match RapiDoc's operation section styling to our theme +rapi-doc::part(section-operations) { + padding: 0; +} + +//////////////////////////////////////////////////////////////////////////////// +////////////////////////// Authentication Tab Content ////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +.api-auth-content { + max-width: 800px; +} + +.api-auth-card { + background: $article-bg; + border: 1px solid $nav-border; + border-radius: $radius; + padding: 1.5rem; + margin-bottom: 1.5rem; + + h3 { + margin-top: 0; + margin-bottom: 0.5rem; + } + + h4 { + margin-top: 1rem; + margin-bottom: 0.5rem; + font-size: 0.9rem; + text-transform: uppercase; + letter-spacing: 0.05em; + color: rgba($article-text, 0.6); + } + + pre { + margin: 0.5rem 0; + padding: 1rem; + background: $article-code-bg; + border-radius: $radius; + overflow-x: auto; + } + + code { + font-family: $code; + font-size: 0.875rem; + } +} + +.api-auth-badge .badge { + display: inline-block; + padding: 0.25rem 0.5rem; + font-size: 0.75rem; + font-weight: $bold; + text-transform: uppercase; + border-radius: $radius; + + &.recommended { + background: $gr-rainforest; + color: $g20-white; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////// Server Tab Content //////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +.api-server-panel { + max-width: 600px; + + h2 { + margin-top: 0; + } +} + +.server-url-config { + display: flex; + gap: 0.5rem; + align-items: flex-end; + margin: 1rem 0; + flex-wrap: wrap; + + label { + width: 100%; + font-weight: $medium; + margin-bottom: 0.25rem; + } + + input { + flex: 1; + min-width: 200px; + padding: 0.5rem; + border: 1px solid $nav-border; + border-radius: $radius; + font-family: $code; + background: $article-bg; + color: $article-text; + } + + button { + padding: 0.5rem 1rem; + background: $r-curacao; + color: $g20-white; + border: none; + border-radius: $radius; + cursor: pointer; + font-weight: $medium; + + &:hover { + background: darken($r-curacao, 10%); + } + } +} + +.server-info { + margin-top: 1.5rem; + + ul { + list-style: disc; + padding-left: 1.5rem; + } + + li { + margin: 0.5rem 0; + } + + code { + background: $article-code-bg; + padding: 0.2rem 0.4rem; + border-radius: 3px; + font-family: $code; + } +} + +//////////////////////////////////////////////////////////////////////////////// +///////////////////////////////// MEDIA QUERIES //////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Tablet: Hide TOC, keep sidebar +@include media(large) { + .content-wrapper.api-content { + flex-direction: column; + } + + .api-toc { + display: none; + } + + .api-main { + padding-right: 0; + } +} + +// Mobile: Standard Hugo sidebar behavior +@include media(medium) { + .content-wrapper.api-content { + flex-direction: column; + } + + .api-toc { + display: none; + } + + .api-main { + padding-right: 0; + } + + // Collapse API nav in mobile view + .api-nav { + margin-top: 1rem; + padding-top: 0.5rem; + + &-group-items { + max-height: none; // Show all items by default in mobile + } + } +} + +// Large screens: Wider TOC +@include media(xlarge) { + .api-toc { + width: 240px; + } +} + +// Match sidebar responsive widths +@media (min-width: 801px) and (max-width: 1200px) { + .api-toc { + width: 180px; + } +} diff --git a/assets/styles/layouts/_api-overrides.scss b/assets/styles/layouts/_api-overrides.scss index bc220588f8..f108911e24 100644 --- a/assets/styles/layouts/_api-overrides.scss +++ b/assets/styles/layouts/_api-overrides.scss @@ -1,9 +1,17 @@ +//////////////////////////////////////////////////////////////////////////////// +// API Documentation Style Overrides +// +// Provides loading spinner and reusable API-related styles. +// Note: Legacy Redoc-specific overrides have been removed in favor of +// Scalar/RapiDoc renderers which use CSS custom properties for theming. +//////////////////////////////////////////////////////////////////////////////// + @import "tools/color-palette"; @import "tools/fonts"; // Fonts $proxima: 'Proxima Nova', sans-serif; -$code: 'IBM Plex Mono', monospace;; +$code: 'IBM Plex Mono', monospace; // Font weights $medium: 500; @@ -22,7 +30,7 @@ $bold: 700; } @keyframes spinner { - to {transform: rotate(360deg);} + to { transform: rotate(360deg); } } .spinner:before { @@ -41,256 +49,15 @@ $bold: 700; animation: spinner .6s linear infinite; } -//////////////////////////////// InfluxDB Header /////////////////////////////// - -#influx-header { - font-family: $proxima; - padding: 10px ; - display: flex; - align-items: center; - justify-content: space-between; - background-color: $g2-kevlar; - a { - text-decoration: none; - &.back { - color: $g20-white; - transition: color .2s; - &:hover { - color: $b-pool; - } - &:before { - content: "\e919"; - font-family: 'icomoon-v2'; - margin-right: .65rem; - } - } - &.btn { - padding: .5rem .75rem .5rem .65rem; - font-size: .85rem; - font-weight: 500; - color: $g15-platinum; - background: $g5-pepper; - border-radius: 4.5px; - transition: all .2s; - &:before { - content: "\e934"; - display: inline-block; - font-size: .95rem; - margin-right: .5rem; - font-family: 'icomoon-v2'; - } - &:hover { - color: $g20-white; - background: $b-pool; - } - } - } -} - -// Header Media Queries - -@media (max-width: 600px) { - #influx-header span.version {display: none;} -} - +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////// HTTP Method Badge Colors /////////////////////////// //////////////////////////////////////////////////////////////////////////////// -.cjtbAK { - h1,h2,h3,h4,h5,h6, - p,li,th,td { - font-family: $proxima !important; - } -} - -#redoc { - h1,h2,h3 { - font-weight: $medium !important; - } -} - -// Section title padding -.dluJDj { - padding: 20px 0; -} - -// Page h1 -.dTJWQH { - color: $g7-graphite; - font-size: 2rem; -} - -// Download button -.jIdpVJ { - background: $b-dodger; - color: $g20-white; - border: none; - border-radius: 3px; - font-family: $proxima; - font-size: .85rem; - font-weight: $medium; - transition: background-color .2s; - &:hover { - background-color: $b-pool; - } -} - -// Tag h1s -.WxWXp { - color: $g7-graphite; - font-size: 1.75rem; -} - -// Summaru h2s and table headers -.ioYTqA, .bxcHYI, .hoUoen { - color: $g7-graphite; -} - -// h3s -.espozG { - color: $g8-storm; -} - -// Links -.bnFPhO a { color: $b-dodger; - &:visited {color: $b-dodger;} -} - -.redoc-json { - font-family: $code !important; -} - -// Inline Code -.flfxUM code, -.gDsWLk code, -.kTVySD { - font-family: $code !important; - color: $cp-marguerite; - background: $cp-titan; - border-color: $cp-titan; -} - -// Required tags -.jsTAxL { - color: $r-curacao; -} - -///////////////////////////// RESPONSE COLOR BLOCKS //////////////////////////// - -// Green -.hLVzSF, .fDvFMp { - background-color: rgba($gr-honeydew, .2); - color: $gr-emerald; -} - -// Red -.byLrBg { - background-color: rgba($r-curacao, .1); - color: $r-curacao; -} - - - -/////////////////////////////////// LEFT NAV /////////////////////////////////// - -// Left nav background -.gZdDsM { - background-color: $g19-ghost; -} - -.gpbcFk:hover, .sc-eTuwsz.active { - background-color: $g17-whisper; -} - -// List item text -.SmuWE, .gcUzvG, .bbViyS, .sc-hrWEMg label { - font-family: $proxima !important; -} - -.fyUykq { - font-weight: $medium; -} - -// Request method tags -.cFwMcp { - &.post { background-color: $b-ocean; } - &.get { background-color: $gr-rainforest; } - &.put { background-color: $br-galaxy; } - &.patch { background-color: $y-thunder; color: rgba($g5-pepper, .75);} - &.delete { background-color: $r-curacao; } -} - -// Active nav section -.gcUzvG, .iNzLCk:hover { - color: $br-magenta; -} - -/////////////////////////////// RIGHT CODE COLUMN ////////////////////////////// - -// Right column backgrounds -.dtUibw, .fLUKgj { - background-color: $g2-kevlar; - h3,h4,h5,h6 { - font-family: $proxima !important; - font-weight: $medium !important; - } -} - -// Code backgrounds -.irpqyy > .react-tabs__tab-panel { - background-color: $g0-obsidian; -} -.dHLKeu, .fVaxnA { - padding-left: 10px; - background-color: $g0-obsidian; -} - -// Response code tabs -.irpqyy > ul > li { - background-color: $g0-obsidian; - border-radius: 3px; - &.react-tabs__tab--selected{ color: $br-pulsar;} - &.tab-error { color: $r-fire; } - &.tab-success { color: $gr-viridian; } -} - -// Request methods -.bNYCAJ, -.jBjYbV, -.hOczRB, -.fRsrDc, -.hPskZd { - font-family: $proxima; - font-weight: $medium; - letter-spacing: .04em; - border-radius: 3px; -} -.bNYCAJ { background-color: $b-ocean; } /* Post */ -.jBjYbV { background-color: $gr-viridian; } /* Get */ -.hOczRB { background-color: $br-galaxy; } /* Put */ -.fRsrDc { background-color: $y-thunder; color: $g5-pepper; } /* Patch */ -.hPskZd { background-color: $r-curacao; } /* Delete */ - -// Content type block -.gzAoUb { - background-color: $g2-kevlar; - font-family: $proxima; -} -.iENVAs { font-family: $code; } -.dpMbau { font-family: $proxima; } - -// Code controls -.fCJmC { - font-family: $proxima; - span { border-radius: 3px; } -} - -// Code blocks -.kZHJcC { font-family: $code; } -.jCgylq { - .token.string { - color: $gr-honeydew; - & + a { color: $b-pool; } - } - .token.boolean { color: #f955b0; } -} +// Reusable method badge colors (used by _api-layout.scss .method-badge) +// These follow standard REST API color conventions +$method-get: $gr-rainforest; +$method-post: $b-ocean; +$method-put: $br-galaxy; +$method-patch: $y-thunder; +$method-delete: $r-curacao; diff --git a/assets/styles/layouts/_api-security-schemes.scss b/assets/styles/layouts/_api-security-schemes.scss new file mode 100644 index 0000000000..a80acb7fb8 --- /dev/null +++ b/assets/styles/layouts/_api-security-schemes.scss @@ -0,0 +1,506 @@ +//////////////////////////////////////////////////////////////////////////////// +// API Security Schemes Styling +// +// Styles for security schemes sections displayed on conceptual API pages +// (like Authentication). These sections are rendered from OpenAPI spec +// securitySchemes using Hugo templates, not RapiDoc. +//////////////////////////////////////////////////////////////////////////////// + +.api-security-schemes { + margin-top: 2rem; + padding-top: 2rem; + border-top: 1px solid $g5-pepper; + + h2 { + margin-bottom: 1.5rem; + } + + .security-scheme { + margin-bottom: 2rem; + padding: 1.5rem; + background: $article-bg; + border: 1px solid $g5-pepper; + border-radius: 4px; + + h3 { + margin: 0 0 1rem 0; + font-size: 1.1rem; + color: $article-heading; + } + } + + .scheme-details { + margin-bottom: 1rem; + + dl { + display: grid; + grid-template-columns: auto 1fr; + gap: 0.5rem 1rem; + margin: 0; + } + + dt { + font-weight: 600; + color: $g9-mountain; + } + + dd { + margin: 0; + + code { + background: $article-code-bg; + color: $article-code; + padding: 0.2em 0.5em; + border-radius: 3px; + font-size: 0.9em; + } + } + } + + .scheme-description { + margin-top: 1rem; + padding-top: 1rem; + border-top: 1px solid $g5-pepper; + + p:first-child { + margin-top: 0; + } + + pre { + margin: 1rem 0; + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// API Auth Popover - Credential input for operation pages +// +// Popover-based UI triggered by "Set credentials" button. +// Positioned above RapiDoc, integrates with "Try it" via JavaScript API. +//////////////////////////////////////////////////////////////////////////////// + +.api-auth-row { + display: flex; + align-items: center; + gap: 0.75rem; + margin-bottom: 0.5rem; +} + +.api-auth-trigger-wrapper { + position: relative; + display: inline-block; +} + +.api-auth-schemes { + font-size: 0.85rem; + color: $g9-mountain; + + .api-auth-label { + font-weight: 400; + opacity: 0.8; + } +} + +.api-auth-trigger { + display: inline-flex; + align-items: center; + gap: 0.5rem; + padding: 0.4rem 0.75rem; + font-size: 0.85rem; + font-weight: 500; + color: $article-text; + background: $g20-white; + border: 1px solid $g5-pepper; + border-radius: 4px; + cursor: pointer; + transition: all 0.15s ease; + + &:hover { + background: rgba($b-pool, 0.08); + border-color: $b-pool; + } + + &:focus { + outline: 2px solid $b-pool; + outline-offset: 2px; + } + + &.has-credentials { + border-color: $gr-viridian; + background: rgba($gr-viridian, 0.08); + } + + .auth-icon { + color: $g9-mountain; + } + + &.has-credentials .auth-icon { + color: $gr-viridian; + } +} + +.auth-status-indicator { + width: 8px; + height: 8px; + background: $gr-viridian; + border-radius: 50%; + margin-left: 0.25rem; +} + +.api-auth-popover { + position: absolute; + top: calc(100% + 8px); + left: 0; + z-index: 1000; + min-width: 320px; + max-width: 400px; + background: $g20-white; + border: 1px solid $g5-pepper; + border-radius: 6px; + box-shadow: 0 4px 16px rgba(0, 0, 0, 0.12); + + &[hidden] { + display: none; + } +} + +.api-auth-popover-content { + padding: 1rem; +} + +.popover-header { + display: flex; + align-items: center; + justify-content: space-between; + margin-bottom: 0.75rem; + + h4 { + margin: 0; + font-size: 0.95rem; + font-weight: 600; + color: $article-heading; + } +} + +.popover-close { + display: flex; + align-items: center; + justify-content: center; + width: 28px; + height: 28px; + padding: 0; + background: none; + border: none; + border-radius: 4px; + cursor: pointer; + color: $g9-mountain; + transition: all 0.15s; + + &:hover { + background: rgba(0, 0, 0, 0.05); + color: $article-text; + } +} + +.auth-description { + margin: 0 0 1rem 0; + font-size: 0.85rem; + color: $g9-mountain; +} + +.auth-field { + margin-bottom: 0.75rem; + + label { + display: block; + margin-bottom: 0.25rem; + font-weight: 600; + font-size: 0.85rem; + color: $article-text; + } + + .auth-label-text { + margin-right: 0.25rem; + } + + .auth-label-hint { + font-weight: 400; + color: $g9-mountain; + } + + input { + width: 100%; + padding: 0.5rem 0.75rem; + border: 1px solid $g5-pepper; + border-radius: 3px; + font-family: inherit; + font-size: 0.9rem; + background: $g20-white; + color: $article-text; + + &:focus { + outline: none; + border-color: $b-pool; + box-shadow: 0 0 0 2px rgba($b-pool, 0.2); + } + + &::placeholder { + color: $g9-mountain; + } + } +} + +.auth-input-group { + position: relative; + display: flex; + align-items: center; + + input { + padding-right: 2.5rem; + } +} + +.auth-show-toggle { + position: absolute; + right: 0.5rem; + display: flex; + align-items: center; + justify-content: center; + padding: 0.25rem; + background: none; + border: none; + cursor: pointer; + color: $g9-mountain; + opacity: 0.6; + transition: opacity 0.15s; + + &:hover { + opacity: 1; + } + + &.showing { + color: $b-pool; + opacity: 1; + } +} + +.auth-field-group { + margin-top: 1rem; + padding-top: 1rem; + border-top: 1px solid $g5-pepper; + + .auth-group-label { + margin: 0 0 0.75rem 0; + font-weight: 600; + font-size: 0.85rem; + color: $article-text; + } +} + +.auth-actions { + display: flex; + gap: 0.5rem; + margin-top: 1rem; + + // Explicit button styling to avoid link-like appearance + .auth-apply, + .auth-clear { + padding: 0.4rem 0.75rem; + font-size: 0.85rem; + font-weight: 500; + border-radius: 4px; + cursor: pointer; + transition: all 0.15s ease; + text-decoration: none; + } + + .auth-apply { + background: $b-pool; + color: $g20-white; + border: 1px solid $b-pool; + + &:hover { + background: darken($b-pool, 8%); + border-color: darken($b-pool, 8%); + } + } + + .auth-clear { + background: transparent; + color: $article-text; + border: 1px solid $g5-pepper; + + &:hover { + background: rgba(0, 0, 0, 0.05); + border-color: $g9-mountain; + } + } +} + +.auth-feedback { + margin: 0.75rem 0 0 0; + padding: 0.5rem 0.75rem; + font-size: 0.85rem; + border-radius: 3px; + + &.auth-feedback--success { + background: rgba($gr-viridian, 0.1); + color: $gr-viridian; + } + + &.auth-feedback--error { + background: rgba($r-fire, 0.1); + color: $r-fire; + } +} + +// Dark theme overrides +[data-theme="dark"], +html:has(link[title="dark-theme"]:not([disabled])) { + .api-auth-schemes { + color: $g15-platinum; + } + + .api-security-schemes { + border-top-color: $grey25; + + .security-scheme { + background: $grey15; + border-color: $grey25; + } + + .scheme-details { + dt { + color: $g15-platinum; + } + } + + .scheme-description { + border-top-color: $grey25; + } + } + + .api-auth-trigger { + background: $grey15; + border-color: $grey25; + color: $g20-white; + + &:hover { + background: rgba($b-pool, 0.1); + border-color: $b-pool; + } + + &.has-credentials { + border-color: $gr-viridian; + background: rgba($gr-viridian, 0.1); + } + + .auth-icon { + color: $g15-platinum; + } + + &.has-credentials .auth-icon { + color: $gr-emerald; + } + } + + .api-auth-popover { + background: $grey15; + border-color: $grey25; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.4); + } + + .popover-header h4 { + color: $g20-white; + } + + .popover-close { + color: $g15-platinum; + + &:hover { + background: $grey20; + color: $g20-white; + } + } + + .auth-description { + color: $g15-platinum; + } + + .auth-field { + label { + color: $g20-white; + } + + .auth-label-hint { + color: $g15-platinum; + } + + input { + background: $grey20; + border-color: $grey25; + color: $g20-white; + + &:focus { + border-color: $b-pool; + } + + &::placeholder { + color: $g9-mountain; + } + } + } + + .auth-show-toggle { + color: $g15-platinum; + + &.showing { + color: $b-pool; + } + } + + .auth-field-group { + border-top-color: $grey25; + + .auth-group-label { + color: $g20-white; + } + } + + .auth-feedback { + &.auth-feedback--success { + background: rgba($gr-viridian, 0.15); + color: $gr-emerald; + } + + &.auth-feedback--error { + background: rgba($r-fire, 0.15); + color: $r-tungsten; + } + } + + .auth-actions { + .auth-apply { + background: $b-pool; + color: $g20-white; + border-color: $b-pool; + + &:hover { + background: lighten($b-pool, 5%); + border-color: lighten($b-pool, 5%); + } + } + + .auth-clear { + background: transparent; + color: $g15-platinum; + border-color: $grey25; + + &:hover { + background: $grey20; + border-color: $g15-platinum; + color: $g20-white; + } + } + } +} diff --git a/assets/styles/layouts/_sidebar.scss b/assets/styles/layouts/_sidebar.scss index 1c8df3c041..9add872854 100644 --- a/assets/styles/layouts/_sidebar.scss +++ b/assets/styles/layouts/_sidebar.scss @@ -255,6 +255,66 @@ } } } + + // API operation items within Hugo menu + .api-operation { + a { + display: flex; + align-items: center; + gap: 0.4rem; + } + + // Path-based operation display (All endpoints list) + &--path .api-path { + font-family: $proxima; + font-size: 0.9rem; + color: inherit; + word-break: break-all; + } + } + + .api-method { + font-size: 0.6rem; + font-weight: 700; + padding: 0.15rem 0.35rem; + border-radius: 3px; + text-transform: uppercase; + flex-shrink: 0; + line-height: 1; + border: 2px solid; + background-color: transparent; + + // Using lighter InfluxData brand colors - bordered style for readability + &--get { border-color: $b-pool; color: $b-pool; } // #00A3FF - bright brand blue + &--post { border-color: $gr-rainforest; color: $gr-rainforest; } // #34BB55 - bright brand green + &--put { border-color: $y-pineapple; color: $y-pineapple; } // #FFB94A - bright yellow (distinct from red) + &--delete { border-color: $r-curacao; color: $r-curacao; } // #F95F53 - bright brand red + &--patch { border-color: $br-new-purple; color: $br-new-purple; } // #9b2aff - distinctive brand purple + } + + // Compatibility version badge (v1 or v2) + .api-compat-badge { + font-size: 0.55rem; + font-weight: 600; + padding: 0.1rem 0.3rem; + border-radius: 3px; + text-transform: uppercase; + flex-shrink: 0; + line-height: 1; + margin-left: auto; + opacity: 0.8; + cursor: help; + + &--v1 { background: #8b5cf6; color: white; } // Purple for v1 + &--v2 { background: #06b6d4; color: white; } // Cyan for v2 + } + + // Non-link group labels (for multi-tag groups) + .nav-group-label { + color: $nav-item; + font-weight: $medium; + display: inline-block; + } } } diff --git a/assets/styles/styles-default.scss b/assets/styles/styles-default.scss index 8852a240c3..ce318f720b 100644 --- a/assets/styles/styles-default.scss +++ b/assets/styles/styles-default.scss @@ -32,7 +32,9 @@ "layouts/v1-overrides", "layouts/notifications", "layouts/code-controls", - "layouts/v3-wayfinding"; + "layouts/v3-wayfinding", + "layouts/api-layout", + "layouts/api-security-schemes"; // Import Components @import "components/influxdb-version-detector", diff --git a/content/influxdb3/core/reference/api/_index.md b/content/influxdb3/core/reference/api/_index.md index 6a2200b1e5..13724ba97f 100644 --- a/content/influxdb3/core/reference/api/_index.md +++ b/content/influxdb3/core/reference/api/_index.md @@ -1,20 +1,12 @@ --- title: InfluxDB HTTP API description: > - The InfluxDB HTTP API for {{% product-name %}} provides a programmatic interface - for interactions with InfluxDB, - including writing, querying, and processing data, and managing an InfluxDB 3 - instance. -menu: - influxdb3_core: - parent: Reference - name: InfluxDB HTTP API -weight: 104 -influxdb3/core/tags: [api] -source: /shared/influxdb3-api-reference/_index.md + The InfluxDB HTTP API for InfluxDB 3 Core provides a programmatic interface + for interactions with InfluxDB. +# Redirect to the new location +aliases: + - /influxdb3/core/reference/api/ +redirect: /influxdb3/core/api/ --- - +This page has moved to [InfluxDB HTTP API](/influxdb3/core/api/). diff --git a/content/influxdb3/enterprise/reference/api/_index.md b/content/influxdb3/enterprise/reference/api/_index.md index ea78867f6d..a5a831de48 100644 --- a/content/influxdb3/enterprise/reference/api/_index.md +++ b/content/influxdb3/enterprise/reference/api/_index.md @@ -1,20 +1,10 @@ --- title: InfluxDB HTTP API description: > - The InfluxDB HTTP API for {{% product-name %}} provides a programmatic interface - for interactions with InfluxDB, - including writing, querying, and processing data, and managing an InfluxDB 3 - instance. -menu: - influxdb3_enterprise: - parent: Reference - name: InfluxDB HTTP API -weight: 104 -influxdb3/enterprise/tags: [api] -source: /shared/influxdb3-api-reference/_index.md + The InfluxDB HTTP API for InfluxDB 3 Enterprise provides a programmatic interface + for interactions with InfluxDB. +# Redirect to the new location +redirect: /influxdb3/enterprise/api/ --- - +This page has moved to [InfluxDB HTTP API](/influxdb3/enterprise/api/). diff --git a/cypress/e2e/content/api-reference.cy.js b/cypress/e2e/content/api-reference.cy.js index ceeaffeffc..ca9d4e47c5 100644 --- a/cypress/e2e/content/api-reference.cy.js +++ b/cypress/e2e/content/api-reference.cy.js @@ -1,8 +1,20 @@ /// + +/** + * API Reference Documentation E2E Tests + * + * Tests both: + * 1. Legacy API reference pages (link validation, content structure) + * 2. New 3-column layout with tabs and TOC (for InfluxDB 3 Core/Enterprise) + * + * Run with: + * node cypress/support/run-e2e-specs.js --spec "cypress/e2e/content/api-reference.cy.js" content/influxdb3/core/reference/api/_index.md + */ + const fakeGoogleTagManager = { trackingOptIn: () => {}, - trackingOptOut: () => {} -} + trackingOptOut: () => {}, +}; describe('API reference content', () => { const subjects = [ @@ -37,42 +49,47 @@ describe('API reference content', () => { '/influxdb3/enterprise/api/', ]; - subjects.forEach((subject) => { describe(subject, () => { beforeEach(() => { - // Intercept and modify the page HTML before it loads - cy.intercept('GET', '**', (req) => { - req.continue((res) => { - if (res.headers['content-type']?.includes('text/html')) { - // Modify the Kapa widget script attributes - // Avoid socket errors from fpjs in tests by disabling fingerprinting - res.body = res.body.replace( - /data-user-analytics-fingerprint-enabled="true"/, - 'data-user-analytics-fingerprint-enabled="false"' - ); - } - }); - }); + // Intercept and modify the page HTML before it loads + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + // Modify the Kapa widget script attributes + // Avoid socket errors from fpjs in tests by disabling fingerprinting + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); cy.visit(subject); - window.fcdsc = fakeGoogleTagManager; cy.stub(window.fcdsc, 'trackingOptIn').as('trackingOptIn'); cy.stub(window.fcdsc, 'trackingOptOut').as('trackingOptOut'); }); it(`has API info`, function () { - cy.get('script[data-user-analytics-fingerprint-enabled=false]').should('have.length', 1); + cy.get('script[data-user-analytics-fingerprint-enabled=false]').should( + 'have.length', + 1 + ); cy.get('h1').first().should('have.length', 1); - cy.get('[data-role$=description]').should('have.length', 1); + // Check for description element (either article--description class or data-role attribute) + cy.get('.article--description, [data-role$=description]').should( + 'have.length.at.least', + 1 + ); }); it('links back to the version home page', function () { - cy.get('a.back').contains('Docs') - .should('have.length', 1) - .click(); + cy.get('a.back').contains('Docs').should('have.length', 1).click(); // Path should be the first two segments and trailing slash in $subject - cy.location('pathname') - .should('eq', subject.replace(/^(\/[^/]+\/[^/]+\/).*/, '$1')); + cy.location('pathname').should( + 'eq', + subject.replace(/^(\/[^/]+\/[^/]+\/).*/, '$1') + ); cy.get('h1').should('have.length', 1); }); it('contains valid internal links', function () { @@ -88,8 +105,7 @@ describe('API reference content', () => { // cy.request doesn't show in your browser's Developer Tools // because the request comes from Node, not from the browser. cy.request($a.attr('href')).its('status').should('eq', 200); - }); - + }); }); }); it('contains valid external links', function () { @@ -109,3 +125,209 @@ describe('API reference content', () => { }); }); }); + +/** + * API Reference Layout Tests + * Tests layout and renderer for InfluxDB 3 Core/Enterprise API documentation + */ +describe('API reference layout', () => { + // API tag pages have RapiDoc renderer + const layoutSubjects = [ + '/influxdb3/core/api/v3/engine/', + '/influxdb3/enterprise/api/v3/engine/', + ]; + + layoutSubjects.forEach((subject) => { + describe(`${subject} layout`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(subject); + }); + + describe('Layout Structure', () => { + it('displays sidebar', () => { + cy.get('.sidebar').should('be.visible'); + }); + + it('displays API content area', () => { + cy.get('.api-content, .content-wrapper, .article--content').should( + 'exist' + ); + }); + }); + + describe('API Renderer', () => { + it('loads API documentation renderer', () => { + cy.get( + '.api-reference-container, rapi-doc, .api-reference-wrapper' + ).should('exist'); + }); + }); + }); + }); +}); + +/** + * RapiDoc Mini Component Tests + * Tests the api-operation shortcode and RapiDoc Mini component behavior + */ +describe('RapiDoc Mini component', () => { + // Operation pages use RapiDoc Mini for single operation rendering + const operationPages = [ + '/influxdb3/core/api/write/post/', + '/influxdb3/core/api/api/v3/write_lp/post/', + ]; + + operationPages.forEach((page) => { + describe(`Operation page ${page}`, () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit(page); + }); + + describe('Component initialization', () => { + it('renders rapidoc-mini component container', () => { + cy.get('[data-component="rapidoc-mini"]').should('exist'); + }); + + it('has data-spec-url attribute', () => { + cy.get('[data-component="rapidoc-mini"]') + .should('have.attr', 'data-spec-url') + .and('match', /\.ya?ml$/); + }); + + it('has data-match-paths attribute', () => { + cy.get('[data-component="rapidoc-mini"]') + .should('have.attr', 'data-match-paths') + .and('match', /^(get|post|put|patch|delete)\s+\//i); + }); + + it('includes machine-readable spec links', () => { + cy.get('link[rel="alternate"][type="application/x-yaml"]').should( + 'exist' + ); + cy.get('link[rel="alternate"][type="application/json"]').should( + 'exist' + ); + }); + }); + + describe('RapiDoc element creation', () => { + it('creates rapi-doc-mini custom element', () => { + // Wait for component to initialize and create the element + cy.get('rapi-doc-mini', { timeout: 10000 }).should('exist'); + }); + + it('rapi-doc-mini has spec-url attribute', () => { + cy.get('rapi-doc-mini', { timeout: 10000 }) + .should('have.attr', 'spec-url') + .and('match', /\.ya?ml$/); + }); + + it('rapi-doc-mini has theme attributes', () => { + cy.get('rapi-doc-mini', { timeout: 10000 }).should( + 'have.attr', + 'theme' + ); + }); + }); + }); + }); + + describe('api-operation shortcode on example page', () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit('/example/'); + }); + + describe('Multiple instances', () => { + it('renders multiple rapidoc-mini containers', () => { + cy.get('[data-component="rapidoc-mini"]').should( + 'have.length.at.least', + 2 + ); + }); + + it('each instance has unique match-paths', () => { + cy.get('[data-component="rapidoc-mini"]').then(($containers) => { + const matchPaths = []; + $containers.each((_, el) => { + const path = el.getAttribute('data-match-paths'); + expect(matchPaths).to.not.include(path); + matchPaths.push(path); + }); + }); + }); + + it('each instance creates its own rapi-doc-mini element', () => { + cy.get('rapi-doc-mini', { timeout: 10000 }).should( + 'have.length.at.least', + 2 + ); + }); + }); + }); + + describe('Theme synchronization', () => { + beforeEach(() => { + cy.intercept('GET', '**', (req) => { + req.continue((res) => { + if (res.headers['content-type']?.includes('text/html')) { + res.body = res.body.replace( + /data-user-analytics-fingerprint-enabled="true"/, + 'data-user-analytics-fingerprint-enabled="false"' + ); + } + }); + }); + cy.visit('/influxdb3/core/api/write/post/'); + }); + + it('applies light theme by default', () => { + cy.get('rapi-doc-mini', { timeout: 10000 }) + .should('have.attr', 'theme') + .and('match', /light|dark/); + }); + + it('rapi-doc-mini has background color attribute', () => { + cy.get('rapi-doc-mini', { timeout: 10000 }).should( + 'have.attr', + 'bg-color' + ); + }); + + it('rapi-doc-mini has text color attribute', () => { + cy.get('rapi-doc-mini', { timeout: 10000 }).should( + 'have.attr', + 'text-color' + ); + }); + }); +}); diff --git a/data/article_data/influxdb/cloud-dedicated/articles.json b/data/article_data/influxdb/cloud-dedicated/articles.json new file mode 100644 index 0000000000..be83022204 --- /dev/null +++ b/data/article_data/influxdb/cloud-dedicated/articles.json @@ -0,0 +1,30 @@ +{ + "articles": [ + { + "path": "api/accounts/{accountId}/clusters", + "fields": { + "name": "/accounts/{accountId}/clusters", + "describes": [ + "/accounts/{accountId}/clusters/{clusterId}/databases", + "/accounts/{accountId}/clusters/{clusterId}/databases/{databaseName}", + "/accounts/{accountId}/clusters/{clusterId}/databases/{databaseName}/tables", + "/accounts/{accountId}/clusters/{clusterId}/tokens", + "/accounts/{accountId}/clusters/{clusterId}/tokens/{tokenId}" + ], + "menuName": "/accounts/{accountId}/clusters", + "title": "/accounts/{accountId}/clusters", + "tags": [ + "accounts-{accountId}", + "clusters" + ], + "apiTags": [ + "Databases", + "Tables", + "Database tokens" + ], + "source": "static/openapi/influxdb-cloud-dedicated/paths/influxdb-cloud-dedicated-accounts-{accountId}-clusters.yaml", + "staticFilePath": "/openapi/influxdb-cloud-dedicated/paths/influxdb-cloud-dedicated-accounts-{accountId}-clusters.yaml" + } + } + ] +} \ No newline at end of file diff --git a/data/article_data/influxdb/cloud-dedicated/articles.yml b/data/article_data/influxdb/cloud-dedicated/articles.yml new file mode 100644 index 0000000000..1be528fd5b --- /dev/null +++ b/data/article_data/influxdb/cloud-dedicated/articles.yml @@ -0,0 +1,24 @@ +articles: + - path: api/accounts/{accountId}/clusters + fields: + name: /accounts/{accountId}/clusters + describes: + - /accounts/{accountId}/clusters/{clusterId}/databases + - /accounts/{accountId}/clusters/{clusterId}/databases/{databaseName} + - >- + /accounts/{accountId}/clusters/{clusterId}/databases/{databaseName}/tables + - /accounts/{accountId}/clusters/{clusterId}/tokens + - /accounts/{accountId}/clusters/{clusterId}/tokens/{tokenId} + menuName: /accounts/{accountId}/clusters + title: /accounts/{accountId}/clusters + tags: + - accounts-{accountId} + - clusters + apiTags: + - Databases + - Tables + - Database tokens + source: >- + static/openapi/influxdb-cloud-dedicated/paths/influxdb-cloud-dedicated-accounts-{accountId}-clusters.yaml + staticFilePath: >- + /openapi/influxdb-cloud-dedicated/paths/influxdb-cloud-dedicated-accounts-{accountId}-clusters.yaml diff --git a/data/article_data/influxdb/cloud-v2/articles.yml b/data/article_data/influxdb/cloud-v2/articles.yml new file mode 100644 index 0000000000..0f188ea905 --- /dev/null +++ b/data/article_data/influxdb/cloud-v2/articles.yml @@ -0,0 +1,499 @@ +articles: + - path: api/v2/authorizations + fields: + name: /api/v2/authorizations + describes: + - /api/v2/authorizations + - /api/v2/authorizations/{authID} + title: |- + /api/v2/authorizations + InfluxDB Cloud API Service + tags: + - api-v2 + - authorizations + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-authorizations.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-authorizations.yaml + - path: api/v2/buckets + fields: + name: /api/v2/buckets + describes: + - /api/v2/buckets + - /api/v2/buckets/{bucketID} + - /api/v2/buckets/{bucketID}/labels + - /api/v2/buckets/{bucketID}/labels/{labelID} + - /api/v2/buckets/{bucketID}/members + - /api/v2/buckets/{bucketID}/members/{userID} + - /api/v2/buckets/{bucketID}/owners + - /api/v2/buckets/{bucketID}/owners/{userID} + - /api/v2/buckets/{bucketID}/schema/measurements + - /api/v2/buckets/{bucketID}/schema/measurements/{measurementID} + title: |- + /api/v2/buckets + InfluxDB Cloud API Service + tags: + - api-v2 + - buckets + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-buckets.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-buckets.yaml + - path: api/v2/checks + fields: + name: /api/v2/checks + describes: + - /api/v2/checks + - /api/v2/checks/{checkID} + - /api/v2/checks/{checkID}/labels + - /api/v2/checks/{checkID}/labels/{labelID} + - /api/v2/checks/{checkID}/query + title: |- + /api/v2/checks + InfluxDB Cloud API Service + tags: + - api-v2 + - checks + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-checks.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-checks.yaml + - path: api/v2/dashboards + fields: + name: /api/v2/dashboards + describes: + - /api/v2/dashboards + - /api/v2/dashboards/{dashboardID} + - /api/v2/dashboards/{dashboardID}/cells + - /api/v2/dashboards/{dashboardID}/cells/{cellID} + - /api/v2/dashboards/{dashboardID}/cells/{cellID}/view + - /api/v2/dashboards/{dashboardID}/labels + - /api/v2/dashboards/{dashboardID}/labels/{labelID} + - /api/v2/dashboards/{dashboardID}/members + - /api/v2/dashboards/{dashboardID}/members/{userID} + - /api/v2/dashboards/{dashboardID}/owners + - /api/v2/dashboards/{dashboardID}/owners/{userID} + title: |- + /api/v2/dashboards + InfluxDB Cloud API Service + tags: + - api-v2 + - dashboards + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-dashboards.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-dashboards.yaml + - path: api/v2/dbrps + fields: + name: /api/v2/dbrps + describes: + - /api/v2/dbrps + - /api/v2/dbrps/{dbrpID} + title: |- + /api/v2/dbrps + InfluxDB Cloud API Service + tags: + - api-v2 + - dbrps + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-dbrps.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-dbrps.yaml + - path: api/v2/delete + fields: + name: /api/v2/delete + describes: + - /api/v2/delete + title: |- + /api/v2/delete + InfluxDB Cloud API Service + tags: + - api-v2 + - delete + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-delete.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-delete.yaml + - path: api/v2/flags + fields: + name: /api/v2/flags + describes: + - /api/v2/flags + title: |- + /api/v2/flags + InfluxDB Cloud API Service + tags: + - api-v2 + - flags + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-flags.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-flags.yaml + - path: api/v2/labels + fields: + name: /api/v2/labels + describes: + - /api/v2/labels + - /api/v2/labels/{labelID} + title: |- + /api/v2/labels + InfluxDB Cloud API Service + tags: + - api-v2 + - labels + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-labels.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-labels.yaml + - path: api/v2/maps + fields: + name: /api/v2/maps + describes: + - /api/v2/maps/mapToken + title: |- + /api/v2/maps + InfluxDB Cloud API Service + tags: + - api-v2 + - maps + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-maps.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-maps.yaml + - path: api/v2/me + fields: + name: /api/v2/me + describes: + - /api/v2/me + - /api/v2/me/password + title: |- + /api/v2/me + InfluxDB Cloud API Service + tags: + - api-v2 + - me + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-me.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-me.yaml + - path: api/v2/notificationEndpoints + fields: + name: /api/v2/notificationEndpoints + describes: + - /api/v2/notificationEndpoints + - /api/v2/notificationEndpoints/{endpointID} + - /api/v2/notificationEndpoints/{endpointID}/labels + - /api/v2/notificationEndpoints/{endpointID}/labels/{labelID} + title: |- + /api/v2/notificationEndpoints + InfluxDB Cloud API Service + tags: + - api-v2 + - notificationEndpoints + source: >- + static/openapi/influxdb-cloud-v2/paths/ref-api-v2-notificationEndpoints.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-notificationEndpoints.yaml + - path: api/v2/notificationRules + fields: + name: /api/v2/notificationRules + describes: + - /api/v2/notificationRules + - /api/v2/notificationRules/{ruleID} + - /api/v2/notificationRules/{ruleID}/labels + - /api/v2/notificationRules/{ruleID}/labels/{labelID} + - /api/v2/notificationRules/{ruleID}/query + title: |- + /api/v2/notificationRules + InfluxDB Cloud API Service + tags: + - api-v2 + - notificationRules + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-notificationRules.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-notificationRules.yaml + - path: api/v2/orgs + fields: + name: /api/v2/orgs + describes: + - /api/v2/orgs + - /api/v2/orgs/{orgID} + - /api/v2/orgs/{orgID}/limits + - /api/v2/orgs/{orgID}/members + - /api/v2/orgs/{orgID}/members/{userID} + - /api/v2/orgs/{orgID}/owners + - /api/v2/orgs/{orgID}/owners/{userID} + - /api/v2/orgs/{orgID}/secrets + - /api/v2/orgs/{orgID}/secrets/delete + - /api/v2/orgs/{orgID}/secrets/{secretID} + - /api/v2/orgs/{orgID}/usage + title: |- + /api/v2/orgs + InfluxDB Cloud API Service + tags: + - api-v2 + - orgs + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-orgs.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-orgs.yaml + - path: api/v2/query + fields: + name: /api/v2/query + describes: + - /api/v2/query + - /api/v2/query/analyze + - /api/v2/query/ast + - /api/v2/query/suggestions + - /api/v2/query/suggestions/{name} + title: |- + /api/v2/query + InfluxDB Cloud API Service + tags: + - api-v2 + - query + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-query.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-query.yaml + - path: api/v2/resources + fields: + name: /api/v2/resources + describes: + - /api/v2/resources + title: |- + /api/v2/resources + InfluxDB Cloud API Service + tags: + - api-v2 + - resources + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-resources.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-resources.yaml + - path: api/v2/scripts + fields: + name: /api/v2/scripts + describes: + - /api/v2/scripts + - /api/v2/scripts/{scriptID} + - /api/v2/scripts/{scriptID}/invoke + - /api/v2/scripts/{scriptID}/params + title: |- + /api/v2/scripts + InfluxDB Cloud API Service + tags: + - api-v2 + - scripts + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-scripts.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-scripts.yaml + - path: api/v2/setup + fields: + name: /api/v2/setup + describes: + - /api/v2/setup + - /api/v2/setup/user + title: |- + /api/v2/setup + InfluxDB Cloud API Service + tags: + - api-v2 + - setup + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-setup.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-setup.yaml + - path: api/v2/signin + fields: + name: /api/v2/signin + describes: + - /api/v2/signin + title: |- + /api/v2/signin + InfluxDB Cloud API Service + tags: + - api-v2 + - signin + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-signin.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-signin.yaml + - path: api/v2/signout + fields: + name: /api/v2/signout + describes: + - /api/v2/signout + title: |- + /api/v2/signout + InfluxDB Cloud API Service + tags: + - api-v2 + - signout + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-signout.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-signout.yaml + - path: api/v2/stacks + fields: + name: /api/v2/stacks + describes: + - /api/v2/stacks + - /api/v2/stacks/{stack_id} + - /api/v2/stacks/{stack_id}/uninstall + title: |- + /api/v2/stacks + InfluxDB Cloud API Service + tags: + - api-v2 + - stacks + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-stacks.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-stacks.yaml + - path: api/v2/tasks + fields: + name: /api/v2/tasks + describes: + - /api/v2/tasks + - /api/v2/tasks/{taskID} + - /api/v2/tasks/{taskID}/labels + - /api/v2/tasks/{taskID}/labels/{labelID} + - /api/v2/tasks/{taskID}/logs + - /api/v2/tasks/{taskID}/members + - /api/v2/tasks/{taskID}/members/{userID} + - /api/v2/tasks/{taskID}/owners + - /api/v2/tasks/{taskID}/owners/{userID} + - /api/v2/tasks/{taskID}/runs + - /api/v2/tasks/{taskID}/runs/{runID} + - /api/v2/tasks/{taskID}/runs/{runID}/logs + - /api/v2/tasks/{taskID}/runs/{runID}/retry + title: |- + /api/v2/tasks + InfluxDB Cloud API Service + tags: + - api-v2 + - tasks + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-tasks.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-tasks.yaml + - path: api/v2/telegraf + fields: + name: /api/v2/telegraf + describes: + - /api/v2/telegraf/plugins + title: |- + /api/v2/telegraf + InfluxDB Cloud API Service + tags: + - api-v2 + - telegraf + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-telegraf.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-telegraf.yaml + - path: api/v2/telegrafs + fields: + name: /api/v2/telegrafs + describes: + - /api/v2/telegrafs + - /api/v2/telegrafs/{telegrafID} + - /api/v2/telegrafs/{telegrafID}/labels + - /api/v2/telegrafs/{telegrafID}/labels/{labelID} + - /api/v2/telegrafs/{telegrafID}/members + - /api/v2/telegrafs/{telegrafID}/members/{userID} + - /api/v2/telegrafs/{telegrafID}/owners + - /api/v2/telegrafs/{telegrafID}/owners/{userID} + title: |- + /api/v2/telegrafs + InfluxDB Cloud API Service + tags: + - api-v2 + - telegrafs + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-telegrafs.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-telegrafs.yaml + - path: api/v2/templates + fields: + name: /api/v2/templates + describes: + - /api/v2/templates/apply + - /api/v2/templates/export + title: |- + /api/v2/templates + InfluxDB Cloud API Service + tags: + - api-v2 + - templates + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-templates.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-templates.yaml + - path: api/v2/users + fields: + name: /api/v2/users + describes: + - /api/v2/users + - /api/v2/users/{userID} + - /api/v2/users/{userID}/password + title: |- + /api/v2/users + InfluxDB Cloud API Service + tags: + - api-v2 + - users + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-users.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-users.yaml + - path: api/v2/variables + fields: + name: /api/v2/variables + describes: + - /api/v2/variables + - /api/v2/variables/{variableID} + - /api/v2/variables/{variableID}/labels + - /api/v2/variables/{variableID}/labels/{labelID} + title: |- + /api/v2/variables + InfluxDB Cloud API Service + tags: + - api-v2 + - variables + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-variables.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-variables.yaml + - path: api/v2/write + fields: + name: /api/v2/write + describes: + - /api/v2/write + title: |- + /api/v2/write + InfluxDB Cloud API Service + tags: + - api-v2 + - write + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2-write.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2-write.yaml + - path: api/v2 + fields: + name: /api/v2 + describes: + - /api/v2 + title: |- + /api/v2 + InfluxDB Cloud API Service + tags: + - api + - v2 + source: static/openapi/influxdb-cloud-v2/paths/ref-api-v2.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-api-v2.yaml + - path: legacy/authorizations + fields: + name: /legacy/authorizations + describes: + - /legacy/authorizations + - /legacy/authorizations/{authID} + - /legacy/authorizations/{authID}/password + title: |- + /legacy/authorizations + InfluxDB Cloud API Service + tags: + - legacy + - authorizations + source: static/openapi/influxdb-cloud-v2/paths/ref-legacy-authorizations.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-legacy-authorizations.yaml + - path: ping + fields: + name: /ping + describes: + - /ping + title: |- + /ping + InfluxDB Cloud API Service + tags: + - '' + - ping + source: static/openapi/influxdb-cloud-v2/paths/ref-ping.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-ping.yaml + - path: query + fields: + name: /query + describes: + - /query + title: |- + /query + InfluxDB Cloud API Service + tags: + - '' + - query + source: static/openapi/influxdb-cloud-v2/paths/ref-query.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-query.yaml + - path: write + fields: + name: /write + describes: + - /write + title: |- + /write + InfluxDB Cloud API Service + tags: + - '' + - write + source: static/openapi/influxdb-cloud-v2/paths/ref-write.yaml + staticFilePath: /openapi/influxdb-cloud-v2/paths/ref-write.yaml diff --git a/data/article_data/influxdb/clustered/articles.json b/data/article_data/influxdb/clustered/articles.json new file mode 100644 index 0000000000..43dccbcd95 --- /dev/null +++ b/data/article_data/influxdb/clustered/articles.json @@ -0,0 +1,66 @@ +{ + "articles": [ + { + "path": "api/databases/{databaseName}/tables", + "fields": { + "name": "/databases/{databaseName}/tables", + "describes": [ + "/databases/{databaseName}/tables" + ], + "menuName": "/databases/{databaseName}/tables", + "title": "/databases/{databaseName}/tables", + "tags": [ + "databases-{databaseName}", + "tables" + ], + "apiTags": [ + "Tables" + ], + "source": "static/openapi/influxdb-clustered/paths/influxdb-clustered-databases-{databaseName}-tables.yaml", + "staticFilePath": "/openapi/influxdb-clustered/paths/influxdb-clustered-databases-{databaseName}-tables.yaml" + } + }, + { + "path": "api/databases", + "fields": { + "name": "/databases", + "describes": [ + "/databases", + "/databases/{databaseName}" + ], + "menuName": "/databases", + "title": "/databases", + "tags": [ + "", + "databases" + ], + "apiTags": [ + "Databases" + ], + "source": "static/openapi/influxdb-clustered/paths/influxdb-clustered-databases.yaml", + "staticFilePath": "/openapi/influxdb-clustered/paths/influxdb-clustered-databases.yaml" + } + }, + { + "path": "api/tokens", + "fields": { + "name": "/tokens", + "describes": [ + "/tokens", + "/tokens/{tokenId}" + ], + "menuName": "/tokens", + "title": "/tokens", + "tags": [ + "", + "tokens" + ], + "apiTags": [ + "Database tokens" + ], + "source": "static/openapi/influxdb-clustered/paths/influxdb-clustered-tokens.yaml", + "staticFilePath": "/openapi/influxdb-clustered/paths/influxdb-clustered-tokens.yaml" + } + } + ] +} \ No newline at end of file diff --git a/data/article_data/influxdb/clustered/articles.yml b/data/article_data/influxdb/clustered/articles.yml new file mode 100644 index 0000000000..0c0dddc892 --- /dev/null +++ b/data/article_data/influxdb/clustered/articles.yml @@ -0,0 +1,48 @@ +articles: + - path: api/databases/{databaseName}/tables + fields: + name: /databases/{databaseName}/tables + describes: + - /databases/{databaseName}/tables + menuName: /databases/{databaseName}/tables + title: /databases/{databaseName}/tables + tags: + - databases-{databaseName} + - tables + apiTags: + - Tables + source: >- + static/openapi/influxdb-clustered/paths/influxdb-clustered-databases-{databaseName}-tables.yaml + staticFilePath: >- + /openapi/influxdb-clustered/paths/influxdb-clustered-databases-{databaseName}-tables.yaml + - path: api/databases + fields: + name: /databases + describes: + - /databases + - /databases/{databaseName} + menuName: /databases + title: /databases + tags: + - '' + - databases + apiTags: + - Databases + source: >- + static/openapi/influxdb-clustered/paths/influxdb-clustered-databases.yaml + staticFilePath: /openapi/influxdb-clustered/paths/influxdb-clustered-databases.yaml + - path: api/tokens + fields: + name: /tokens + describes: + - /tokens + - /tokens/{tokenId} + menuName: /tokens + title: /tokens + tags: + - '' + - tokens + apiTags: + - Database tokens + source: static/openapi/influxdb-clustered/paths/influxdb-clustered-tokens.yaml + staticFilePath: /openapi/influxdb-clustered/paths/influxdb-clustered-tokens.yaml diff --git a/data/article_data/influxdb/influxdb3_core/articles.json b/data/article_data/influxdb/influxdb3_core/articles.json new file mode 100644 index 0000000000..c010a18cf5 --- /dev/null +++ b/data/article_data/influxdb/influxdb3_core/articles.json @@ -0,0 +1,652 @@ +{ + "articles": [ + { + "path": "api/auth-token", + "fields": { + "name": "Auth token", + "describes": [ + "/api/v3/configure/token/admin", + "/api/v3/configure/token/admin/regenerate", + "/api/v3/configure/token", + "/api/v3/configure/token/named_admin" + ], + "title": "Auth token", + "description": "Manage tokens for authentication and authorization", + "tag": "Auth token", + "isConceptual": false, + "menuGroup": "Other", + "operations": [ + { + "operationId": "PostCreateAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/admin", + "summary": "Create admin token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "PostRegenerateAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/admin/regenerate", + "summary": "Regenerate admin token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "DeleteToken", + "method": "DELETE", + "path": "/api/v3/configure/token", + "summary": "Delete token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "PostCreateNamedAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/named_admin", + "summary": "Create named admin token", + "tags": [ + "Auth token" + ] + } + ], + "tagDescription": "Manage tokens for authentication and authorization", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-auth-token.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-auth-token.yaml" + } + }, + { + "path": "api/authentication", + "fields": { + "name": "Authentication", + "describes": [ + "/api/v3/configure/token/admin/regenerate", + "/api/v3/configure/token", + "/api/v3/configure/token/named_admin" + ], + "title": "Authentication", + "description": "Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API:\n\n| Authentication scheme | Works with |\n|:----------------------|:-----------|\n| Bearer authentication | All endpoints |\n| Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) |\n| Basic authentication | v1 compatibility endpoints (`/write`, `/query`) |\n| Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) |\n\nSee the **Security Schemes** section below for details on each authentication method.\n", + "tag": "Authentication", + "isConceptual": true, + "menuGroup": "Concepts", + "operations": [ + { + "operationId": "PostRegenerateAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/admin/regenerate", + "summary": "Regenerate admin token", + "tags": [ + "Authentication" + ] + }, + { + "operationId": "DeleteToken", + "method": "DELETE", + "path": "/api/v3/configure/token", + "summary": "Delete token", + "tags": [ + "Authentication" + ] + }, + { + "operationId": "PostCreateNamedAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/named_admin", + "summary": "Create named admin token", + "tags": [ + "Authentication" + ] + } + ], + "tagDescription": "Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API:\n\n| Authentication scheme | Works with |\n|:----------------------|:-----------|\n| Bearer authentication | All endpoints |\n| Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) |\n| Basic authentication | v1 compatibility endpoints (`/write`, `/query`) |\n| Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) |\n\nSee the **Security Schemes** section below for details on each authentication method.\n", + "showSecuritySchemes": true, + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-authentication.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-authentication.yaml" + } + }, + { + "path": "api/cache-data", + "fields": { + "name": "Cache data", + "describes": [ + "/api/v3/configure/distinct_cache", + "/api/v3/configure/last_cache" + ], + "title": "Cache data", + "description": "Manage the in-memory cache.\n\n#### Distinct Value Cache\n\nThe Distinct Value Cache (DVC) lets you cache distinct\nvalues of one or more columns in a table, improving the performance of\nqueries that return distinct tag and field values. \n\nThe DVC is an in-memory cache that stores distinct values for specific columns\nin a table. When you create an DVC, you can specify what columns' distinct\nvalues to cache, the maximum number of distinct value combinations to cache, and\nthe maximum age of cached values. A DVC is associated with a table, which can\nhave multiple DVCs.\n\n#### Last value cache\n\nThe Last Value Cache (LVC) lets you cache the most recent\nvalues for specific fields in a table, improving the performance of queries that\nreturn the most recent value of a field for specific series or the last N values\nof a field.\n\nThe LVC is an in-memory cache that stores the last N number of values for\nspecific fields of series in a table. When you create an LVC, you can specify\nwhat fields to cache, what tags to use to identify each series, and the\nnumber of values to cache for each unique series.\nAn LVC is associated with a table, which can have multiple LVCs.\n\n#### Related guides\n\n- [Manage the Distinct Value Cache](/influxdb3/core/admin/distinct-value-cache/)\n- [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/)\n", + "tag": "Cache data", + "isConceptual": false, + "menuGroup": "Data Operations", + "operations": [ + { + "operationId": "PostConfigureDistinctCache", + "method": "POST", + "path": "/api/v3/configure/distinct_cache", + "summary": "Create distinct cache", + "tags": [ + "Cache data" + ] + }, + { + "operationId": "DeleteConfigureDistinctCache", + "method": "DELETE", + "path": "/api/v3/configure/distinct_cache", + "summary": "Delete distinct cache", + "tags": [ + "Cache data" + ] + }, + { + "operationId": "PostConfigureLastCache", + "method": "POST", + "path": "/api/v3/configure/last_cache", + "summary": "Create last cache", + "tags": [ + "Cache data" + ] + }, + { + "operationId": "DeleteConfigureLastCache", + "method": "DELETE", + "path": "/api/v3/configure/last_cache", + "summary": "Delete last cache", + "tags": [ + "Cache data" + ] + } + ], + "tagDescription": "Manage the in-memory cache.\n\n#### Distinct Value Cache\n\nThe Distinct Value Cache (DVC) lets you cache distinct\nvalues of one or more columns in a table, improving the performance of\nqueries that return distinct tag and field values. \n\nThe DVC is an in-memory cache that stores distinct values for specific columns\nin a table. When you create an DVC, you can specify what columns' distinct\nvalues to cache, the maximum number of distinct value combinations to cache, and\nthe maximum age of cached values. A DVC is associated with a table, which can\nhave multiple DVCs.\n\n#### Last value cache\n\nThe Last Value Cache (LVC) lets you cache the most recent\nvalues for specific fields in a table, improving the performance of queries that\nreturn the most recent value of a field for specific series or the last N values\nof a field.\n\nThe LVC is an in-memory cache that stores the last N number of values for\nspecific fields of series in a table. When you create an LVC, you can specify\nwhat fields to cache, what tags to use to identify each series, and the\nnumber of values to cache for each unique series.\nAn LVC is associated with a table, which can have multiple LVCs.\n\n#### Related guides\n\n- [Manage the Distinct Value Cache](/influxdb3/core/admin/distinct-value-cache/)\n- [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/)\n", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-cache-data.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-cache-data.yaml" + } + }, + { + "path": "api/database", + "fields": { + "name": "Database", + "describes": [ + "/api/v3/configure/database", + "/api/v3/configure/database/retention_period" + ], + "title": "Database", + "description": "Manage databases", + "tag": "Database", + "isConceptual": false, + "menuGroup": "Administration", + "operations": [ + { + "operationId": "GetConfigureDatabase", + "method": "GET", + "path": "/api/v3/configure/database", + "summary": "List databases", + "tags": [ + "Database" + ] + }, + { + "operationId": "PostConfigureDatabase", + "method": "POST", + "path": "/api/v3/configure/database", + "summary": "Create a database", + "tags": [ + "Database" + ] + }, + { + "operationId": "DeleteConfigureDatabase", + "method": "DELETE", + "path": "/api/v3/configure/database", + "summary": "Delete a database", + "tags": [ + "Database" + ] + }, + { + "operationId": "DeleteDatabaseRetentionPeriod", + "method": "DELETE", + "path": "/api/v3/configure/database/retention_period", + "summary": "Remove database retention period", + "tags": [ + "Database" + ] + } + ], + "tagDescription": "Manage databases", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-database.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-database.yaml" + } + }, + { + "path": "api/headers-and-parameters", + "fields": { + "name": "Headers and parameters", + "describes": [], + "title": "Headers and parameters", + "description": "Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use.\n\n### Common parameters\n\nThe following table shows common parameters used by many InfluxDB API endpoints.\nMany endpoints may require other parameters in the query string or in the\nrequest body that perform functions specific to those endpoints.\n\n| Query parameter | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `db` | string | The database name |\n\nInfluxDB HTTP API endpoints use standard HTTP request and response headers.\nThe following table shows common headers used by many InfluxDB API endpoints.\nSome endpoints may use other headers that perform functions more specific to those endpoints--for example,\nthe write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body.\n\n| Header | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `Accept` | string | The content type that the client can understand. |\n| `Authorization` | string | The [authorization scheme and credential](/influxdb3/core/api/authentication/). |\n| `Content-Length` | integer | The size of the entity-body, in bytes. |\n| `Content-Type` | string | The format of the data in the request body. |\n", + "tag": "Headers and parameters", + "isConceptual": true, + "menuGroup": "Concepts", + "operations": [], + "tagDescription": "Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use.\n\n### Common parameters\n\nThe following table shows common parameters used by many InfluxDB API endpoints.\nMany endpoints may require other parameters in the query string or in the\nrequest body that perform functions specific to those endpoints.\n\n| Query parameter | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `db` | string | The database name |\n\nInfluxDB HTTP API endpoints use standard HTTP request and response headers.\nThe following table shows common headers used by many InfluxDB API endpoints.\nSome endpoints may use other headers that perform functions more specific to those endpoints--for example,\nthe write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body.\n\n| Header | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `Accept` | string | The content type that the client can understand. |\n| `Authorization` | string | The [authorization scheme and credential](/influxdb3/core/api/authentication/). |\n| `Content-Length` | integer | The size of the entity-body, in bytes. |\n| `Content-Type` | string | The format of the data in the request body. |\n", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-headers-and-parameters.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-headers-and-parameters.yaml" + } + }, + { + "path": "api/migrate-from-influxdb-v1-or-v2", + "fields": { + "name": "Migrate from InfluxDB v1 or v2", + "describes": [], + "title": "Migrate from InfluxDB v1 or v2", + "description": "Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3.\n\nInfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools.\nOperations marked with v1 or v2 badges are compatible with the respective InfluxDB version.\n\n### Migration guides\n\n- [Migrate from InfluxDB v1](/influxdb3/core/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x\n- [Migrate from InfluxDB v2](/influxdb3/core/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud\n- [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints\n- [Use the v1 HTTP query API](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP\n", + "tag": "Migrate from InfluxDB v1 or v2", + "isConceptual": true, + "menuGroup": "Other", + "operations": [], + "tagDescription": "Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3.\n\nInfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools.\nOperations marked with v1 or v2 badges are compatible with the respective InfluxDB version.\n\n### Migration guides\n\n- [Migrate from InfluxDB v1](/influxdb3/core/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x\n- [Migrate from InfluxDB v2](/influxdb3/core/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud\n- [Use compatibility APIs to write data](/influxdb3/core/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints\n- [Use the v1 HTTP query API](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP\n", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-migrate-from-influxdb-v1-or-v2.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-migrate-from-influxdb-v1-or-v2.yaml" + } + }, + { + "path": "api/processing-engine", + "fields": { + "name": "Processing engine", + "describes": [ + "/api/v3/configure/processing_engine_trigger", + "/api/v3/configure/processing_engine_trigger/disable", + "/api/v3/configure/processing_engine_trigger/enable", + "/api/v3/configure/plugin_environment/install_packages", + "/api/v3/configure/plugin_environment/install_requirements", + "/api/v3/plugin_test/wal", + "/api/v3/plugin_test/schedule", + "/api/v3/engine/{request_path}", + "/api/v3/plugins/files", + "/api/v3/plugins/directory" + ], + "title": "Processing engine", + "description": "Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins.\n\nInfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database.\nUse Processing engine plugins and triggers to run code and perform tasks for different database events.\n\nTo get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide.\n", + "tag": "Processing engine", + "isConceptual": false, + "menuGroup": "Processing Engine", + "operations": [ + { + "operationId": "PostConfigureProcessingEngineTrigger", + "method": "POST", + "path": "/api/v3/configure/processing_engine_trigger", + "summary": "Create processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "DeleteConfigureProcessingEngineTrigger", + "method": "DELETE", + "path": "/api/v3/configure/processing_engine_trigger", + "summary": "Delete processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostDisableProcessingEngineTrigger", + "method": "POST", + "path": "/api/v3/configure/processing_engine_trigger/disable", + "summary": "Disable processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostEnableProcessingEngineTrigger", + "method": "POST", + "path": "/api/v3/configure/processing_engine_trigger/enable", + "summary": "Enable processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostInstallPluginPackages", + "method": "POST", + "path": "/api/v3/configure/plugin_environment/install_packages", + "summary": "Install plugin packages", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostInstallPluginRequirements", + "method": "POST", + "path": "/api/v3/configure/plugin_environment/install_requirements", + "summary": "Install plugin requirements", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostTestWALPlugin", + "method": "POST", + "path": "/api/v3/plugin_test/wal", + "summary": "Test WAL plugin", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostTestSchedulingPlugin", + "method": "POST", + "path": "/api/v3/plugin_test/schedule", + "summary": "Test scheduling plugin", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "GetProcessingEnginePluginRequest", + "method": "GET", + "path": "/api/v3/engine/{request_path}", + "summary": "On Request processing engine plugin request", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostProcessingEnginePluginRequest", + "method": "POST", + "path": "/api/v3/engine/{request_path}", + "summary": "On Request processing engine plugin request", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PutPluginFile", + "method": "PUT", + "path": "/api/v3/plugins/files", + "summary": "Update plugin file", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PutPluginDirectory", + "method": "PUT", + "path": "/api/v3/plugins/directory", + "summary": "Update plugin directory", + "tags": [ + "Processing engine" + ] + } + ], + "tagDescription": "Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins.\n\nInfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database.\nUse Processing engine plugins and triggers to run code and perform tasks for different database events.\n\nTo get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/core/processing-engine/) guide.\n", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-processing-engine.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-processing-engine.yaml" + } + }, + { + "path": "api/query-data", + "fields": { + "name": "Query data", + "describes": [ + "/api/v3/query_sql", + "/api/v3/query_influxql", + "/query" + ], + "title": "Query data", + "description": "Query data using SQL or InfluxQL", + "tag": "Query data", + "isConceptual": false, + "menuGroup": "Data Operations", + "operations": [ + { + "operationId": "GetExecuteQuerySQL", + "method": "GET", + "path": "/api/v3/query_sql", + "summary": "Execute SQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "PostExecuteQuerySQL", + "method": "POST", + "path": "/api/v3/query_sql", + "summary": "Execute SQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "GetExecuteInfluxQLQuery", + "method": "GET", + "path": "/api/v3/query_influxql", + "summary": "Execute InfluxQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "PostExecuteQueryInfluxQL", + "method": "POST", + "path": "/api/v3/query_influxql", + "summary": "Execute InfluxQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "GetV1ExecuteQuery", + "method": "GET", + "path": "/query", + "summary": "Execute InfluxQL query (v1-compatible)", + "tags": [ + "Query data" + ], + "compatVersion": "v1", + "externalDocs": { + "description": "Use the InfluxDB v1 HTTP query API and InfluxQL to query data", + "url": "/influxdb/version/query-data/execute-queries/influxdb-v1-api/" + } + }, + { + "operationId": "PostExecuteV1Query", + "method": "POST", + "path": "/query", + "summary": "Execute InfluxQL query (v1-compatible)", + "tags": [ + "Query data" + ], + "compatVersion": "v1", + "externalDocs": { + "description": "Use the InfluxDB v1 HTTP query API and InfluxQL to query data", + "url": "/influxdb/version/query-data/execute-queries/influxdb-v1-api/" + } + } + ], + "tagDescription": "Query data using SQL or InfluxQL", + "related": [ + "/influxdb/version/query-data/execute-queries/influxdb-v1-api/" + ], + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-query-data.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-query-data.yaml" + } + }, + { + "path": "api/quick-start", + "fields": { + "name": "Quick start", + "describes": [], + "title": "Quick start", + "description": "1. [Create an admin token](#section/Authentication) to authorize API requests.\n\n ```bash\n curl -X POST \"http://localhost:8181/api/v3/configure/token/admin\"\n ```\n2. [Check the status](#section/Server-information) of the InfluxDB server.\n\n ```bash\n curl \"http://localhost:8181/health\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\"\n ```\n\n3. [Write data](#operation/PostWriteLP) to InfluxDB.\n\n ```bash\n curl \"http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto\"\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-raw \"home,room=Kitchen temp=72.0\n home,room=Living\\ room temp=71.5\"\n ```\n\n If all data is written, the response is `204 No Content`.\n\n4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB.\n\n ```bash\n curl -G \"http://localhost:8181/api/v3/query_sql\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-urlencode \"db=sensors\" \\\n --data-urlencode \"q=SELECT * FROM home WHERE room='Living room'\" \\\n --data-urlencode \"format=jsonl\"\n ```\n \n Output:\n\n ```jsonl\n {\"room\":\"Living room\",\"temp\":71.5,\"time\":\"2025-02-25T20:19:34.984098\"}\n ```\n \nFor more information about using InfluxDB 3, see the [Get started](/influxdb3/core/get-started/) guide.\n", + "tag": "Quick start", + "isConceptual": true, + "menuGroup": "Concepts", + "operations": [], + "tagDescription": "1. [Create an admin token](#section/Authentication) to authorize API requests.\n\n ```bash\n curl -X POST \"http://localhost:8181/api/v3/configure/token/admin\"\n ```\n2. [Check the status](#section/Server-information) of the InfluxDB server.\n\n ```bash\n curl \"http://localhost:8181/health\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\"\n ```\n\n3. [Write data](#operation/PostWriteLP) to InfluxDB.\n\n ```bash\n curl \"http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto\"\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-raw \"home,room=Kitchen temp=72.0\n home,room=Living\\ room temp=71.5\"\n ```\n\n If all data is written, the response is `204 No Content`.\n\n4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB.\n\n ```bash\n curl -G \"http://localhost:8181/api/v3/query_sql\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-urlencode \"db=sensors\" \\\n --data-urlencode \"q=SELECT * FROM home WHERE room='Living room'\" \\\n --data-urlencode \"format=jsonl\"\n ```\n \n Output:\n\n ```jsonl\n {\"room\":\"Living room\",\"temp\":71.5,\"time\":\"2025-02-25T20:19:34.984098\"}\n ```\n \nFor more information about using InfluxDB 3, see the [Get started](/influxdb3/core/get-started/) guide.\n", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-quick-start.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-quick-start.yaml" + } + }, + { + "path": "api/server-information", + "fields": { + "name": "Server information", + "describes": [ + "/health", + "/api/v1/health", + "/ping", + "/metrics" + ], + "title": "Server information", + "description": "Retrieve server metrics, status, and version information", + "tag": "Server information", + "isConceptual": false, + "menuGroup": "Server", + "operations": [ + { + "operationId": "GetHealth", + "method": "GET", + "path": "/health", + "summary": "Health check", + "tags": [ + "Server information" + ] + }, + { + "operationId": "GetHealthV1", + "method": "GET", + "path": "/api/v1/health", + "summary": "Health check (v1-compatible)", + "tags": [ + "Server information" + ], + "compatVersion": "v1" + }, + { + "operationId": "GetPing", + "method": "GET", + "path": "/ping", + "summary": "Ping the server", + "tags": [ + "Server information" + ] + }, + { + "operationId": "GetMetrics", + "method": "GET", + "path": "/metrics", + "summary": "Metrics", + "tags": [ + "Server information" + ] + } + ], + "tagDescription": "Retrieve server metrics, status, and version information", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-server-information.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-server-information.yaml" + } + }, + { + "path": "api/table", + "fields": { + "name": "Table", + "describes": [ + "/api/v3/configure/table" + ], + "title": "Table", + "description": "Manage table schemas and data", + "tag": "Table", + "isConceptual": false, + "menuGroup": "Administration", + "operations": [ + { + "operationId": "PostConfigureTable", + "method": "POST", + "path": "/api/v3/configure/table", + "summary": "Create a table", + "tags": [ + "Table" + ] + }, + { + "operationId": "DeleteConfigureTable", + "method": "DELETE", + "path": "/api/v3/configure/table", + "summary": "Delete a table", + "tags": [ + "Table" + ] + } + ], + "tagDescription": "Manage table schemas and data", + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-table.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-table.yaml" + } + }, + { + "path": "api/write-data", + "fields": { + "name": "Write data", + "describes": [ + "/write", + "/api/v2/write", + "/api/v3/write_lp" + ], + "title": "Write data", + "description": "Write data to InfluxDB 3 using line protocol format.\n\n#### Timestamp precision across write APIs\n\nInfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions.\nThe following table compares timestamp precision support across v1, v2, and v3 write APIs:\n\n| Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) |\n|-----------|---------------|----------------------|-------------------------|\n| **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) |\n| **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` |\n| **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` |\n| **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` |\n| **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` |\n| **Minutes** | ✅ `m` | ❌ No | ❌ No |\n| **Hours** | ✅ `h` | ❌ No | ❌ No |\n| **Default** | Nanosecond | Nanosecond | **Auto** (guessed) |\n\nAll timestamps are stored internally as nanoseconds.\n", + "tag": "Write data", + "isConceptual": false, + "menuGroup": "Data Operations", + "operations": [ + { + "operationId": "PostV1Write", + "method": "POST", + "path": "/write", + "summary": "Write line protocol (v1-compatible)", + "tags": [ + "Write data" + ], + "compatVersion": "v1", + "externalDocs": { + "description": "Use compatibility APIs to write data", + "url": "/influxdb/version/write-data/http-api/compatibility-apis/" + } + }, + { + "operationId": "PostV2Write", + "method": "POST", + "path": "/api/v2/write", + "summary": "Write line protocol (v2-compatible)", + "tags": [ + "Write data" + ], + "compatVersion": "v2", + "externalDocs": { + "description": "Use compatibility APIs to write data", + "url": "/influxdb/version/write-data/http-api/compatibility-apis/" + } + }, + { + "operationId": "PostWriteLP", + "method": "POST", + "path": "/api/v3/write_lp", + "summary": "Write line protocol", + "tags": [ + "Write data" + ] + } + ], + "tagDescription": "Write data to InfluxDB 3 using line protocol format.\n\n#### Timestamp precision across write APIs\n\nInfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions.\nThe following table compares timestamp precision support across v1, v2, and v3 write APIs:\n\n| Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) |\n|-----------|---------------|----------------------|-------------------------|\n| **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) |\n| **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` |\n| **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` |\n| **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` |\n| **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` |\n| **Minutes** | ✅ `m` | ❌ No | ❌ No |\n| **Hours** | ✅ `h` | ❌ No | ❌ No |\n| **Default** | Nanosecond | Nanosecond | **Auto** (guessed) |\n\nAll timestamps are stored internally as nanoseconds.\n", + "related": [ + "/influxdb/version/write-data/http-api/compatibility-apis/" + ], + "source": "static/openapi/influxdb3-core/tags/tags/influxdb3-core-write-data.yaml", + "staticFilePath": "/openapi/influxdb3-core/tags/tags/influxdb3-core-write-data.yaml" + } + } + ] +} \ No newline at end of file diff --git a/data/article_data/influxdb/influxdb3_core/articles.yml b/data/article_data/influxdb/influxdb3_core/articles.yml new file mode 100644 index 0000000000..221e298d4d --- /dev/null +++ b/data/article_data/influxdb/influxdb3_core/articles.yml @@ -0,0 +1,994 @@ +articles: + - path: api/auth-token + fields: + name: Auth token + describes: + - /api/v3/configure/token/admin + - /api/v3/configure/token/admin/regenerate + - /api/v3/configure/token + - /api/v3/configure/token/named_admin + title: Auth token + description: Manage tokens for authentication and authorization + tag: Auth token + isConceptual: false + menuGroup: Other + operations: + - operationId: PostCreateAdminToken + method: POST + path: /api/v3/configure/token/admin + summary: Create admin token + tags: + - Auth token + - operationId: PostRegenerateAdminToken + method: POST + path: /api/v3/configure/token/admin/regenerate + summary: Regenerate admin token + tags: + - Auth token + - operationId: DeleteToken + method: DELETE + path: /api/v3/configure/token + summary: Delete token + tags: + - Auth token + - operationId: PostCreateNamedAdminToken + method: POST + path: /api/v3/configure/token/named_admin + summary: Create named admin token + tags: + - Auth token + tagDescription: Manage tokens for authentication and authorization + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-auth-token.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-auth-token.yaml + - path: api/authentication + fields: + name: Authentication + describes: + - /api/v3/configure/token/admin/regenerate + - /api/v3/configure/token + - /api/v3/configure/token/named_admin + title: Authentication + description: > + Depending on your workflow, use one of the following schemes to + authenticate to the InfluxDB 3 API: + + + | Authentication scheme | Works with | + + |:----------------------|:-----------| + + | Bearer authentication | All endpoints | + + | Token authentication | v1 and v2 compatibility endpoints (`/write`, + `/query`, `/api/v2/write`) | + + | Basic authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + | Querystring authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + + See the **Security Schemes** section below for details on each + authentication method. + tag: Authentication + isConceptual: true + menuGroup: Concepts + operations: + - operationId: PostRegenerateAdminToken + method: POST + path: /api/v3/configure/token/admin/regenerate + summary: Regenerate admin token + tags: + - Authentication + - operationId: DeleteToken + method: DELETE + path: /api/v3/configure/token + summary: Delete token + tags: + - Authentication + - operationId: PostCreateNamedAdminToken + method: POST + path: /api/v3/configure/token/named_admin + summary: Create named admin token + tags: + - Authentication + tagDescription: > + Depending on your workflow, use one of the following schemes to + authenticate to the InfluxDB 3 API: + + + | Authentication scheme | Works with | + + |:----------------------|:-----------| + + | Bearer authentication | All endpoints | + + | Token authentication | v1 and v2 compatibility endpoints (`/write`, + `/query`, `/api/v2/write`) | + + | Basic authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + | Querystring authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + + See the **Security Schemes** section below for details on each + authentication method. + showSecuritySchemes: true + source: >- + static/openapi/influxdb3-core/tags/tags/influxdb3-core-authentication.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-authentication.yaml + - path: api/cache-data + fields: + name: Cache data + describes: + - /api/v3/configure/distinct_cache + - /api/v3/configure/last_cache + title: Cache data + description: > + Manage the in-memory cache. + + + #### Distinct Value Cache + + + The Distinct Value Cache (DVC) lets you cache distinct + + values of one or more columns in a table, improving the performance of + + queries that return distinct tag and field values. + + + The DVC is an in-memory cache that stores distinct values for specific + columns + + in a table. When you create an DVC, you can specify what columns' + distinct + + values to cache, the maximum number of distinct value combinations to + cache, and + + the maximum age of cached values. A DVC is associated with a table, + which can + + have multiple DVCs. + + + #### Last value cache + + + The Last Value Cache (LVC) lets you cache the most recent + + values for specific fields in a table, improving the performance of + queries that + + return the most recent value of a field for specific series or the last + N values + + of a field. + + + The LVC is an in-memory cache that stores the last N number of values + for + + specific fields of series in a table. When you create an LVC, you can + specify + + what fields to cache, what tags to use to identify each series, and the + + number of values to cache for each unique series. + + An LVC is associated with a table, which can have multiple LVCs. + + + #### Related guides + + + - [Manage the Distinct Value + Cache](/influxdb3/core/admin/distinct-value-cache/) + + - [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/) + tag: Cache data + isConceptual: false + menuGroup: Data Operations + operations: + - operationId: PostConfigureDistinctCache + method: POST + path: /api/v3/configure/distinct_cache + summary: Create distinct cache + tags: + - Cache data + - operationId: DeleteConfigureDistinctCache + method: DELETE + path: /api/v3/configure/distinct_cache + summary: Delete distinct cache + tags: + - Cache data + - operationId: PostConfigureLastCache + method: POST + path: /api/v3/configure/last_cache + summary: Create last cache + tags: + - Cache data + - operationId: DeleteConfigureLastCache + method: DELETE + path: /api/v3/configure/last_cache + summary: Delete last cache + tags: + - Cache data + tagDescription: > + Manage the in-memory cache. + + + #### Distinct Value Cache + + + The Distinct Value Cache (DVC) lets you cache distinct + + values of one or more columns in a table, improving the performance of + + queries that return distinct tag and field values. + + + The DVC is an in-memory cache that stores distinct values for specific + columns + + in a table. When you create an DVC, you can specify what columns' + distinct + + values to cache, the maximum number of distinct value combinations to + cache, and + + the maximum age of cached values. A DVC is associated with a table, + which can + + have multiple DVCs. + + + #### Last value cache + + + The Last Value Cache (LVC) lets you cache the most recent + + values for specific fields in a table, improving the performance of + queries that + + return the most recent value of a field for specific series or the last + N values + + of a field. + + + The LVC is an in-memory cache that stores the last N number of values + for + + specific fields of series in a table. When you create an LVC, you can + specify + + what fields to cache, what tags to use to identify each series, and the + + number of values to cache for each unique series. + + An LVC is associated with a table, which can have multiple LVCs. + + + #### Related guides + + + - [Manage the Distinct Value + Cache](/influxdb3/core/admin/distinct-value-cache/) + + - [Manage the Last Value Cache](/influxdb3/core/admin/last-value-cache/) + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-cache-data.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-cache-data.yaml + - path: api/database + fields: + name: Database + describes: + - /api/v3/configure/database + - /api/v3/configure/database/retention_period + title: Database + description: Manage databases + tag: Database + isConceptual: false + menuGroup: Administration + operations: + - operationId: GetConfigureDatabase + method: GET + path: /api/v3/configure/database + summary: List databases + tags: + - Database + - operationId: PostConfigureDatabase + method: POST + path: /api/v3/configure/database + summary: Create a database + tags: + - Database + - operationId: DeleteConfigureDatabase + method: DELETE + path: /api/v3/configure/database + summary: Delete a database + tags: + - Database + - operationId: DeleteDatabaseRetentionPeriod + method: DELETE + path: /api/v3/configure/database/retention_period + summary: Remove database retention period + tags: + - Database + tagDescription: Manage databases + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-database.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-database.yaml + - path: api/headers-and-parameters + fields: + name: Headers and parameters + describes: [] + title: Headers and parameters + description: > + Most InfluxDB API endpoints require parameters in the request--for + example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API + endpoints. + + Many endpoints may require other parameters in the query string or in + the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response + headers. + + The following table shows common headers used by many InfluxDB API + endpoints. + + Some endpoints may use other headers that perform functions more + specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate + that line protocol is compressed in the request body. + + + | Header | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `Accept` | string | The content type + that the client can understand. | + + | `Authorization` | string | The [authorization + scheme and credential](/influxdb3/core/api/authentication/). | + + | `Content-Length` | integer | The size of the + entity-body, in bytes. | + + | `Content-Type` | string | The format of the + data in the request body. | + tag: Headers and parameters + isConceptual: true + menuGroup: Concepts + operations: [] + tagDescription: > + Most InfluxDB API endpoints require parameters in the request--for + example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API + endpoints. + + Many endpoints may require other parameters in the query string or in + the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response + headers. + + The following table shows common headers used by many InfluxDB API + endpoints. + + Some endpoints may use other headers that perform functions more + specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate + that line protocol is compressed in the request body. + + + | Header | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `Accept` | string | The content type + that the client can understand. | + + | `Authorization` | string | The [authorization + scheme and credential](/influxdb3/core/api/authentication/). | + + | `Content-Length` | integer | The size of the + entity-body, in bytes. | + + | `Content-Type` | string | The format of the + data in the request body. | + source: >- + static/openapi/influxdb3-core/tags/tags/influxdb3-core-headers-and-parameters.yaml + staticFilePath: >- + /openapi/influxdb3-core/tags/tags/influxdb3-core-headers-and-parameters.yaml + - path: api/migrate-from-influxdb-v1-or-v2 + fields: + name: Migrate from InfluxDB v1 or v2 + describes: [] + title: Migrate from InfluxDB v1 or v2 + description: > + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x + and 2.x client libraries and tools. + + Operations marked with v1 or v2 badges are compatible with the + respective InfluxDB version. + + + ### Migration guides + + + - [Migrate from InfluxDB + v1](/influxdb3/core/guides/migrate/influxdb-1x/) - For users migrating + from InfluxDB 1.x + + - [Migrate from InfluxDB + v2](/influxdb3/core/guides/migrate/influxdb-2x/) - For users migrating + from InfluxDB 2.x or Cloud + + - [Use compatibility APIs to write + data](/influxdb3/core/write-data/http-api/compatibility-apis/) - v1 and + v2 write endpoints + + - [Use the v1 HTTP query + API](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) - + InfluxQL queries via HTTP + tag: Migrate from InfluxDB v1 or v2 + isConceptual: true + menuGroup: Other + operations: [] + tagDescription: > + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x + and 2.x client libraries and tools. + + Operations marked with v1 or v2 badges are compatible with the + respective InfluxDB version. + + + ### Migration guides + + + - [Migrate from InfluxDB + v1](/influxdb3/core/guides/migrate/influxdb-1x/) - For users migrating + from InfluxDB 1.x + + - [Migrate from InfluxDB + v2](/influxdb3/core/guides/migrate/influxdb-2x/) - For users migrating + from InfluxDB 2.x or Cloud + + - [Use compatibility APIs to write + data](/influxdb3/core/write-data/http-api/compatibility-apis/) - v1 and + v2 write endpoints + + - [Use the v1 HTTP query + API](/influxdb3/core/query-data/execute-queries/influxdb-v1-api/) - + InfluxQL queries via HTTP + source: >- + static/openapi/influxdb3-core/tags/tags/influxdb3-core-migrate-from-influxdb-v1-or-v2.yaml + staticFilePath: >- + /openapi/influxdb3-core/tags/tags/influxdb3-core-migrate-from-influxdb-v1-or-v2.yaml + - path: api/processing-engine + fields: + name: Processing engine + describes: + - /api/v3/configure/processing_engine_trigger + - /api/v3/configure/processing_engine_trigger/disable + - /api/v3/configure/processing_engine_trigger/enable + - /api/v3/configure/plugin_environment/install_packages + - /api/v3/configure/plugin_environment/install_requirements + - /api/v3/plugin_test/wal + - /api/v3/plugin_test/schedule + - /api/v3/engine/{request_path} + - /api/v3/plugins/files + - /api/v3/plugins/directory + title: Processing engine + description: > + Manage Processing engine triggers, test plugins, and send requests to + trigger On Request plugins. + + + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python + VM that can dynamically load and trigger Python plugins in response to + events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks + for different database events. + + + To get started with the processing engine, see the [Processing engine + and Python plugins](/influxdb3/core/processing-engine/) guide. + tag: Processing engine + isConceptual: false + menuGroup: Processing Engine + operations: + - operationId: PostConfigureProcessingEngineTrigger + method: POST + path: /api/v3/configure/processing_engine_trigger + summary: Create processing engine trigger + tags: + - Processing engine + - operationId: DeleteConfigureProcessingEngineTrigger + method: DELETE + path: /api/v3/configure/processing_engine_trigger + summary: Delete processing engine trigger + tags: + - Processing engine + - operationId: PostDisableProcessingEngineTrigger + method: POST + path: /api/v3/configure/processing_engine_trigger/disable + summary: Disable processing engine trigger + tags: + - Processing engine + - operationId: PostEnableProcessingEngineTrigger + method: POST + path: /api/v3/configure/processing_engine_trigger/enable + summary: Enable processing engine trigger + tags: + - Processing engine + - operationId: PostInstallPluginPackages + method: POST + path: /api/v3/configure/plugin_environment/install_packages + summary: Install plugin packages + tags: + - Processing engine + - operationId: PostInstallPluginRequirements + method: POST + path: /api/v3/configure/plugin_environment/install_requirements + summary: Install plugin requirements + tags: + - Processing engine + - operationId: PostTestWALPlugin + method: POST + path: /api/v3/plugin_test/wal + summary: Test WAL plugin + tags: + - Processing engine + - operationId: PostTestSchedulingPlugin + method: POST + path: /api/v3/plugin_test/schedule + summary: Test scheduling plugin + tags: + - Processing engine + - operationId: GetProcessingEnginePluginRequest + method: GET + path: /api/v3/engine/{request_path} + summary: On Request processing engine plugin request + tags: + - Processing engine + - operationId: PostProcessingEnginePluginRequest + method: POST + path: /api/v3/engine/{request_path} + summary: On Request processing engine plugin request + tags: + - Processing engine + - operationId: PutPluginFile + method: PUT + path: /api/v3/plugins/files + summary: Update plugin file + tags: + - Processing engine + - operationId: PutPluginDirectory + method: PUT + path: /api/v3/plugins/directory + summary: Update plugin directory + tags: + - Processing engine + tagDescription: > + Manage Processing engine triggers, test plugins, and send requests to + trigger On Request plugins. + + + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python + VM that can dynamically load and trigger Python plugins in response to + events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks + for different database events. + + + To get started with the processing engine, see the [Processing engine + and Python plugins](/influxdb3/core/processing-engine/) guide. + source: >- + static/openapi/influxdb3-core/tags/tags/influxdb3-core-processing-engine.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-processing-engine.yaml + - path: api/query-data + fields: + name: Query data + describes: + - /api/v3/query_sql + - /api/v3/query_influxql + - /query + title: Query data + description: Query data using SQL or InfluxQL + tag: Query data + isConceptual: false + menuGroup: Data Operations + operations: + - operationId: GetExecuteQuerySQL + method: GET + path: /api/v3/query_sql + summary: Execute SQL query + tags: + - Query data + - operationId: PostExecuteQuerySQL + method: POST + path: /api/v3/query_sql + summary: Execute SQL query + tags: + - Query data + - operationId: GetExecuteInfluxQLQuery + method: GET + path: /api/v3/query_influxql + summary: Execute InfluxQL query + tags: + - Query data + - operationId: PostExecuteQueryInfluxQL + method: POST + path: /api/v3/query_influxql + summary: Execute InfluxQL query + tags: + - Query data + - operationId: GetV1ExecuteQuery + method: GET + path: /query + summary: Execute InfluxQL query (v1-compatible) + tags: + - Query data + compatVersion: v1 + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + - operationId: PostExecuteV1Query + method: POST + path: /query + summary: Execute InfluxQL query (v1-compatible) + tags: + - Query data + compatVersion: v1 + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + tagDescription: Query data using SQL or InfluxQL + related: + - /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-query-data.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-query-data.yaml + - path: api/quick-start + fields: + name: Quick start + describes: [] + title: Quick start + description: > + 1. [Create an admin token](#section/Authentication) to authorize API + requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB + server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3, see the [Get + started](/influxdb3/core/get-started/) guide. + tag: Quick start + isConceptual: true + menuGroup: Concepts + operations: [] + tagDescription: > + 1. [Create an admin token](#section/Authentication) to authorize API + requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB + server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3, see the [Get + started](/influxdb3/core/get-started/) guide. + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-quick-start.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-quick-start.yaml + - path: api/server-information + fields: + name: Server information + describes: + - /health + - /api/v1/health + - /ping + - /metrics + title: Server information + description: Retrieve server metrics, status, and version information + tag: Server information + isConceptual: false + menuGroup: Server + operations: + - operationId: GetHealth + method: GET + path: /health + summary: Health check + tags: + - Server information + - operationId: GetHealthV1 + method: GET + path: /api/v1/health + summary: Health check (v1-compatible) + tags: + - Server information + compatVersion: v1 + - operationId: GetPing + method: GET + path: /ping + summary: Ping the server + tags: + - Server information + - operationId: GetMetrics + method: GET + path: /metrics + summary: Metrics + tags: + - Server information + tagDescription: Retrieve server metrics, status, and version information + source: >- + static/openapi/influxdb3-core/tags/tags/influxdb3-core-server-information.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-server-information.yaml + - path: api/table + fields: + name: Table + describes: + - /api/v3/configure/table + title: Table + description: Manage table schemas and data + tag: Table + isConceptual: false + menuGroup: Administration + operations: + - operationId: PostConfigureTable + method: POST + path: /api/v3/configure/table + summary: Create a table + tags: + - Table + - operationId: DeleteConfigureTable + method: DELETE + path: /api/v3/configure/table + summary: Delete a table + tags: + - Table + tagDescription: Manage table schemas and data + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-table.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-table.yaml + - path: api/write-data + fields: + name: Write data + describes: + - /write + - /api/v2/write + - /api/v3/write_lp + title: Write data + description: > + Write data to InfluxDB 3 using line protocol format. + + + #### Timestamp precision across write APIs + + + InfluxDB 3 provides multiple write endpoints for compatibility with + different InfluxDB versions. + + The following table compares timestamp precision support across v1, v2, + and v3 write APIs: + + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 + (`/api/v3/write_lp`) | + + |-----------|---------------|----------------------|-------------------------| + + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + + | **Hours** | ✅ `h` | ❌ No | ❌ No | + + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + + All timestamps are stored internally as nanoseconds. + tag: Write data + isConceptual: false + menuGroup: Data Operations + operations: + - operationId: PostV1Write + method: POST + path: /write + summary: Write line protocol (v1-compatible) + tags: + - Write data + compatVersion: v1 + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ + - operationId: PostV2Write + method: POST + path: /api/v2/write + summary: Write line protocol (v2-compatible) + tags: + - Write data + compatVersion: v2 + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ + - operationId: PostWriteLP + method: POST + path: /api/v3/write_lp + summary: Write line protocol + tags: + - Write data + tagDescription: > + Write data to InfluxDB 3 using line protocol format. + + + #### Timestamp precision across write APIs + + + InfluxDB 3 provides multiple write endpoints for compatibility with + different InfluxDB versions. + + The following table compares timestamp precision support across v1, v2, + and v3 write APIs: + + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 + (`/api/v3/write_lp`) | + + |-----------|---------------|----------------------|-------------------------| + + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + + | **Hours** | ✅ `h` | ❌ No | ❌ No | + + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + + All timestamps are stored internally as nanoseconds. + related: + - /influxdb/version/write-data/http-api/compatibility-apis/ + source: static/openapi/influxdb3-core/tags/tags/influxdb3-core-write-data.yaml + staticFilePath: /openapi/influxdb3-core/tags/tags/influxdb3-core-write-data.yaml diff --git a/data/article_data/influxdb/influxdb3_enterprise/articles.json b/data/article_data/influxdb/influxdb3_enterprise/articles.json new file mode 100644 index 0000000000..438e358f0c --- /dev/null +++ b/data/article_data/influxdb/influxdb3_enterprise/articles.json @@ -0,0 +1,701 @@ +{ + "articles": [ + { + "path": "api/auth-token", + "fields": { + "name": "Auth token", + "describes": [ + "/api/v3/configure/token/admin", + "/api/v3/configure/token/admin/regenerate", + "/api/v3/configure/token", + "/api/v3/configure/token/named_admin", + "/api/v3/configure/enterprise/token" + ], + "title": "Auth token", + "description": "Manage tokens for authentication and authorization", + "tag": "Auth token", + "isConceptual": false, + "menuGroup": "Other", + "operations": [ + { + "operationId": "PostCreateAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/admin", + "summary": "Create admin token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "PostRegenerateAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/admin/regenerate", + "summary": "Regenerate admin token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "DeleteToken", + "method": "DELETE", + "path": "/api/v3/configure/token", + "summary": "Delete token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "PostCreateNamedAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/named_admin", + "summary": "Create named admin token", + "tags": [ + "Auth token" + ] + }, + { + "operationId": "PostCreateResourceToken", + "method": "POST", + "path": "/api/v3/configure/enterprise/token", + "summary": "Create a resource token", + "tags": [ + "Auth token" + ] + } + ], + "tagDescription": "Manage tokens for authentication and authorization", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-auth-token.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-auth-token.yaml" + } + }, + { + "path": "api/authentication", + "fields": { + "name": "Authentication", + "describes": [ + "/api/v3/configure/token/admin/regenerate", + "/api/v3/configure/token", + "/api/v3/configure/token/named_admin", + "/api/v3/configure/enterprise/token" + ], + "title": "Authentication", + "description": "Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API:\n\n| Authentication scheme | Works with |\n|:----------------------|:-----------|\n| Bearer authentication | All endpoints |\n| Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) |\n| Basic authentication | v1 compatibility endpoints (`/write`, `/query`) |\n| Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) |\n\nSee the **Security Schemes** section below for details on each authentication method.\n", + "tag": "Authentication", + "isConceptual": true, + "menuGroup": "Concepts", + "operations": [ + { + "operationId": "PostRegenerateAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/admin/regenerate", + "summary": "Regenerate admin token", + "tags": [ + "Authentication" + ] + }, + { + "operationId": "DeleteToken", + "method": "DELETE", + "path": "/api/v3/configure/token", + "summary": "Delete token", + "tags": [ + "Authentication" + ] + }, + { + "operationId": "PostCreateNamedAdminToken", + "method": "POST", + "path": "/api/v3/configure/token/named_admin", + "summary": "Create named admin token", + "tags": [ + "Authentication" + ] + }, + { + "operationId": "PostCreateResourceToken", + "method": "POST", + "path": "/api/v3/configure/enterprise/token", + "summary": "Create a resource token", + "tags": [ + "Authentication" + ] + } + ], + "tagDescription": "Depending on your workflow, use one of the following schemes to authenticate to the InfluxDB 3 API:\n\n| Authentication scheme | Works with |\n|:----------------------|:-----------|\n| Bearer authentication | All endpoints |\n| Token authentication | v1 and v2 compatibility endpoints (`/write`, `/query`, `/api/v2/write`) |\n| Basic authentication | v1 compatibility endpoints (`/write`, `/query`) |\n| Querystring authentication | v1 compatibility endpoints (`/write`, `/query`) |\n\nSee the **Security Schemes** section below for details on each authentication method.\n", + "showSecuritySchemes": true, + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-authentication.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-authentication.yaml" + } + }, + { + "path": "api/cache-data", + "fields": { + "name": "Cache data", + "describes": [ + "/api/v3/configure/distinct_cache", + "/api/v3/configure/last_cache" + ], + "title": "Cache data", + "description": "Manage the in-memory cache.\n\n#### Distinct Value Cache\n\nThe Distinct Value Cache (DVC) lets you cache distinct\nvalues of one or more columns in a table, improving the performance of\nqueries that return distinct tag and field values. \n\nThe DVC is an in-memory cache that stores distinct values for specific columns\nin a table. When you create an DVC, you can specify what columns' distinct\nvalues to cache, the maximum number of distinct value combinations to cache, and\nthe maximum age of cached values. A DVC is associated with a table, which can\nhave multiple DVCs.\n\n#### Last value cache\n\nThe Last Value Cache (LVC) lets you cache the most recent\nvalues for specific fields in a table, improving the performance of queries that\nreturn the most recent value of a field for specific series or the last N values\nof a field.\n\nThe LVC is an in-memory cache that stores the last N number of values for\nspecific fields of series in a table. When you create an LVC, you can specify\nwhat fields to cache, what tags to use to identify each series, and the\nnumber of values to cache for each unique series.\nAn LVC is associated with a table, which can have multiple LVCs.\n\n#### Related guides\n\n- [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/)\n- [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/)\n", + "tag": "Cache data", + "isConceptual": false, + "menuGroup": "Data Operations", + "operations": [ + { + "operationId": "PostConfigureDistinctCache", + "method": "POST", + "path": "/api/v3/configure/distinct_cache", + "summary": "Create distinct cache", + "tags": [ + "Cache data" + ] + }, + { + "operationId": "DeleteConfigureDistinctCache", + "method": "DELETE", + "path": "/api/v3/configure/distinct_cache", + "summary": "Delete distinct cache", + "tags": [ + "Cache data" + ] + }, + { + "operationId": "PostConfigureLastCache", + "method": "POST", + "path": "/api/v3/configure/last_cache", + "summary": "Create last cache", + "tags": [ + "Cache data" + ] + }, + { + "operationId": "DeleteConfigureLastCache", + "method": "DELETE", + "path": "/api/v3/configure/last_cache", + "summary": "Delete last cache", + "tags": [ + "Cache data" + ] + } + ], + "tagDescription": "Manage the in-memory cache.\n\n#### Distinct Value Cache\n\nThe Distinct Value Cache (DVC) lets you cache distinct\nvalues of one or more columns in a table, improving the performance of\nqueries that return distinct tag and field values. \n\nThe DVC is an in-memory cache that stores distinct values for specific columns\nin a table. When you create an DVC, you can specify what columns' distinct\nvalues to cache, the maximum number of distinct value combinations to cache, and\nthe maximum age of cached values. A DVC is associated with a table, which can\nhave multiple DVCs.\n\n#### Last value cache\n\nThe Last Value Cache (LVC) lets you cache the most recent\nvalues for specific fields in a table, improving the performance of queries that\nreturn the most recent value of a field for specific series or the last N values\nof a field.\n\nThe LVC is an in-memory cache that stores the last N number of values for\nspecific fields of series in a table. When you create an LVC, you can specify\nwhat fields to cache, what tags to use to identify each series, and the\nnumber of values to cache for each unique series.\nAn LVC is associated with a table, which can have multiple LVCs.\n\n#### Related guides\n\n- [Manage the Distinct Value Cache](/influxdb3/enterprise/admin/distinct-value-cache/)\n- [Manage the Last Value Cache](/influxdb3/enterprise/admin/last-value-cache/)\n", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-cache-data.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-cache-data.yaml" + } + }, + { + "path": "api/database", + "fields": { + "name": "Database", + "describes": [ + "/api/v3/configure/database", + "/api/v3/configure/database/retention_period", + "/api/v3/configure/database/{db}" + ], + "title": "Database", + "description": "Manage databases", + "tag": "Database", + "isConceptual": false, + "menuGroup": "Administration", + "operations": [ + { + "operationId": "GetConfigureDatabase", + "method": "GET", + "path": "/api/v3/configure/database", + "summary": "List databases", + "tags": [ + "Database" + ] + }, + { + "operationId": "PostConfigureDatabase", + "method": "POST", + "path": "/api/v3/configure/database", + "summary": "Create a database", + "tags": [ + "Database" + ] + }, + { + "operationId": "DeleteConfigureDatabase", + "method": "DELETE", + "path": "/api/v3/configure/database", + "summary": "Delete a database", + "tags": [ + "Database" + ] + }, + { + "operationId": "DeleteDatabaseRetentionPeriod", + "method": "DELETE", + "path": "/api/v3/configure/database/retention_period", + "summary": "Remove database retention period", + "tags": [ + "Database" + ] + }, + { + "operationId": "PatchConfigureDatabase", + "method": "PATCH", + "path": "/api/v3/configure/database/{db}", + "summary": "Update a database", + "tags": [ + "Database" + ] + } + ], + "tagDescription": "Manage databases", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-database.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-database.yaml" + } + }, + { + "path": "api/headers-and-parameters", + "fields": { + "name": "Headers and parameters", + "describes": [], + "title": "Headers and parameters", + "description": "Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use.\n\n### Common parameters\n\nThe following table shows common parameters used by many InfluxDB API endpoints.\nMany endpoints may require other parameters in the query string or in the\nrequest body that perform functions specific to those endpoints.\n\n| Query parameter | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `db` | string | The database name |\n\nInfluxDB HTTP API endpoints use standard HTTP request and response headers.\nThe following table shows common headers used by many InfluxDB API endpoints.\nSome endpoints may use other headers that perform functions more specific to those endpoints--for example,\nthe write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body.\n\n| Header | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `Accept` | string | The content type that the client can understand. |\n| `Authorization` | string | The [authorization scheme and credential](/influxdb3/enterprise/api/authentication/). |\n| `Content-Length` | integer | The size of the entity-body, in bytes. |\n| `Content-Type` | string | The format of the data in the request body. |\n", + "tag": "Headers and parameters", + "isConceptual": true, + "menuGroup": "Concepts", + "operations": [], + "tagDescription": "Most InfluxDB API endpoints require parameters in the request--for example, specifying the database to use.\n\n### Common parameters\n\nThe following table shows common parameters used by many InfluxDB API endpoints.\nMany endpoints may require other parameters in the query string or in the\nrequest body that perform functions specific to those endpoints.\n\n| Query parameter | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `db` | string | The database name |\n\nInfluxDB HTTP API endpoints use standard HTTP request and response headers.\nThe following table shows common headers used by many InfluxDB API endpoints.\nSome endpoints may use other headers that perform functions more specific to those endpoints--for example,\nthe write endpoints accept the `Content-Encoding` header to indicate that line protocol is compressed in the request body.\n\n| Header | Value type | Description |\n|:------------------------ |:--------------------- |:-------------------------------------------|\n| `Accept` | string | The content type that the client can understand. |\n| `Authorization` | string | The [authorization scheme and credential](/influxdb3/enterprise/api/authentication/). |\n| `Content-Length` | integer | The size of the entity-body, in bytes. |\n| `Content-Type` | string | The format of the data in the request body. |\n", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-headers-and-parameters.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-headers-and-parameters.yaml" + } + }, + { + "path": "api/migrate-from-influxdb-v1-or-v2", + "fields": { + "name": "Migrate from InfluxDB v1 or v2", + "describes": [], + "title": "Migrate from InfluxDB v1 or v2", + "description": "Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3.\n\nInfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools.\nOperations marked with v1 or v2 badges are compatible with the respective InfluxDB version.\n\n### Migration guides\n\n- [Migrate from InfluxDB v1](/influxdb3/enterprise/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x\n- [Migrate from InfluxDB v2](/influxdb3/enterprise/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud\n- [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints\n- [Use the v1 HTTP query API](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP\n", + "tag": "Migrate from InfluxDB v1 or v2", + "isConceptual": true, + "menuGroup": "Other", + "operations": [], + "tagDescription": "Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3.\n\nInfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x and 2.x client libraries and tools.\nOperations marked with v1 or v2 badges are compatible with the respective InfluxDB version.\n\n### Migration guides\n\n- [Migrate from InfluxDB v1](/influxdb3/enterprise/guides/migrate/influxdb-1x/) - For users migrating from InfluxDB 1.x\n- [Migrate from InfluxDB v2](/influxdb3/enterprise/guides/migrate/influxdb-2x/) - For users migrating from InfluxDB 2.x or Cloud\n- [Use compatibility APIs to write data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - v1 and v2 write endpoints\n- [Use the v1 HTTP query API](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) - InfluxQL queries via HTTP\n", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-migrate-from-influxdb-v1-or-v2.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-migrate-from-influxdb-v1-or-v2.yaml" + } + }, + { + "path": "api/processing-engine", + "fields": { + "name": "Processing engine", + "describes": [ + "/api/v3/configure/processing_engine_trigger", + "/api/v3/configure/processing_engine_trigger/disable", + "/api/v3/configure/processing_engine_trigger/enable", + "/api/v3/configure/plugin_environment/install_packages", + "/api/v3/configure/plugin_environment/install_requirements", + "/api/v3/plugin_test/wal", + "/api/v3/plugin_test/schedule", + "/api/v3/engine/{request_path}", + "/api/v3/plugins/files", + "/api/v3/plugins/directory" + ], + "title": "Processing engine", + "description": "Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins.\n\nInfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database.\nUse Processing engine plugins and triggers to run code and perform tasks for different database events.\n\nTo get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide.\n", + "tag": "Processing engine", + "isConceptual": false, + "menuGroup": "Processing Engine", + "operations": [ + { + "operationId": "PostConfigureProcessingEngineTrigger", + "method": "POST", + "path": "/api/v3/configure/processing_engine_trigger", + "summary": "Create processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "DeleteConfigureProcessingEngineTrigger", + "method": "DELETE", + "path": "/api/v3/configure/processing_engine_trigger", + "summary": "Delete processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostDisableProcessingEngineTrigger", + "method": "POST", + "path": "/api/v3/configure/processing_engine_trigger/disable", + "summary": "Disable processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostEnableProcessingEngineTrigger", + "method": "POST", + "path": "/api/v3/configure/processing_engine_trigger/enable", + "summary": "Enable processing engine trigger", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostInstallPluginPackages", + "method": "POST", + "path": "/api/v3/configure/plugin_environment/install_packages", + "summary": "Install plugin packages", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostInstallPluginRequirements", + "method": "POST", + "path": "/api/v3/configure/plugin_environment/install_requirements", + "summary": "Install plugin requirements", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostTestWALPlugin", + "method": "POST", + "path": "/api/v3/plugin_test/wal", + "summary": "Test WAL plugin", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostTestSchedulingPlugin", + "method": "POST", + "path": "/api/v3/plugin_test/schedule", + "summary": "Test scheduling plugin", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "GetProcessingEnginePluginRequest", + "method": "GET", + "path": "/api/v3/engine/{request_path}", + "summary": "On Request processing engine plugin request", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PostProcessingEnginePluginRequest", + "method": "POST", + "path": "/api/v3/engine/{request_path}", + "summary": "On Request processing engine plugin request", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PutPluginFile", + "method": "PUT", + "path": "/api/v3/plugins/files", + "summary": "Update plugin file", + "tags": [ + "Processing engine" + ] + }, + { + "operationId": "PutPluginDirectory", + "method": "PUT", + "path": "/api/v3/plugins/directory", + "summary": "Update plugin directory", + "tags": [ + "Processing engine" + ] + } + ], + "tagDescription": "Manage Processing engine triggers, test plugins, and send requests to trigger On Request plugins.\n\nInfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python VM that can dynamically load and trigger Python plugins in response to events in your database.\nUse Processing engine plugins and triggers to run code and perform tasks for different database events.\n\nTo get started with the processing engine, see the [Processing engine and Python plugins](/influxdb3/enterprise/processing-engine/) guide.\n", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-processing-engine.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-processing-engine.yaml" + } + }, + { + "path": "api/query-data", + "fields": { + "name": "Query data", + "describes": [ + "/api/v3/query_sql", + "/api/v3/query_influxql", + "/query" + ], + "title": "Query data", + "description": "Query data using SQL or InfluxQL", + "tag": "Query data", + "isConceptual": false, + "menuGroup": "Data Operations", + "operations": [ + { + "operationId": "GetExecuteQuerySQL", + "method": "GET", + "path": "/api/v3/query_sql", + "summary": "Execute SQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "PostExecuteQuerySQL", + "method": "POST", + "path": "/api/v3/query_sql", + "summary": "Execute SQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "GetExecuteInfluxQLQuery", + "method": "GET", + "path": "/api/v3/query_influxql", + "summary": "Execute InfluxQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "PostExecuteQueryInfluxQL", + "method": "POST", + "path": "/api/v3/query_influxql", + "summary": "Execute InfluxQL query", + "tags": [ + "Query data" + ] + }, + { + "operationId": "GetV1ExecuteQuery", + "method": "GET", + "path": "/query", + "summary": "Execute InfluxQL query (v1-compatible)", + "tags": [ + "Query data" + ], + "compatVersion": "v1", + "externalDocs": { + "description": "Use the InfluxDB v1 HTTP query API and InfluxQL to query data", + "url": "/influxdb/version/query-data/execute-queries/influxdb-v1-api/" + } + }, + { + "operationId": "PostExecuteV1Query", + "method": "POST", + "path": "/query", + "summary": "Execute InfluxQL query (v1-compatible)", + "tags": [ + "Query data" + ], + "compatVersion": "v1", + "externalDocs": { + "description": "Use the InfluxDB v1 HTTP query API and InfluxQL to query data", + "url": "/influxdb/version/query-data/execute-queries/influxdb-v1-api/" + } + } + ], + "tagDescription": "Query data using SQL or InfluxQL", + "related": [ + "/influxdb/version/query-data/execute-queries/influxdb-v1-api/" + ], + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-query-data.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-query-data.yaml" + } + }, + { + "path": "api/quick-start", + "fields": { + "name": "Quick start", + "describes": [], + "title": "Quick start", + "description": "1. [Create an admin token](#section/Authentication) to authorize API requests.\n\n ```bash\n curl -X POST \"http://localhost:8181/api/v3/configure/token/admin\"\n ```\n2. [Check the status](#section/Server-information) of the InfluxDB server.\n\n ```bash\n curl \"http://localhost:8181/health\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\"\n ```\n\n3. [Write data](#operation/PostWriteLP) to InfluxDB.\n\n ```bash\n curl \"http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto\"\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-raw \"home,room=Kitchen temp=72.0\n home,room=Living\\ room temp=71.5\"\n ```\n\n If all data is written, the response is `204 No Content`.\n\n4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB.\n\n ```bash\n curl -G \"http://localhost:8181/api/v3/query_sql\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-urlencode \"db=sensors\" \\\n --data-urlencode \"q=SELECT * FROM home WHERE room='Living room'\" \\\n --data-urlencode \"format=jsonl\"\n ```\n \n Output:\n\n ```jsonl\n {\"room\":\"Living room\",\"temp\":71.5,\"time\":\"2025-02-25T20:19:34.984098\"}\n ```\n \nFor more information about using InfluxDB 3, see the [Get started](/influxdb3/enterprise/get-started/) guide.\n", + "tag": "Quick start", + "isConceptual": true, + "menuGroup": "Concepts", + "operations": [], + "tagDescription": "1. [Create an admin token](#section/Authentication) to authorize API requests.\n\n ```bash\n curl -X POST \"http://localhost:8181/api/v3/configure/token/admin\"\n ```\n2. [Check the status](#section/Server-information) of the InfluxDB server.\n\n ```bash\n curl \"http://localhost:8181/health\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\"\n ```\n\n3. [Write data](#operation/PostWriteLP) to InfluxDB.\n\n ```bash\n curl \"http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto\"\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-raw \"home,room=Kitchen temp=72.0\n home,room=Living\\ room temp=71.5\"\n ```\n\n If all data is written, the response is `204 No Content`.\n\n4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB.\n\n ```bash\n curl -G \"http://localhost:8181/api/v3/query_sql\" \\\n --header \"Authorization: Bearer ADMIN_TOKEN\" \\\n --data-urlencode \"db=sensors\" \\\n --data-urlencode \"q=SELECT * FROM home WHERE room='Living room'\" \\\n --data-urlencode \"format=jsonl\"\n ```\n \n Output:\n\n ```jsonl\n {\"room\":\"Living room\",\"temp\":71.5,\"time\":\"2025-02-25T20:19:34.984098\"}\n ```\n \nFor more information about using InfluxDB 3, see the [Get started](/influxdb3/enterprise/get-started/) guide.\n", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-quick-start.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-quick-start.yaml" + } + }, + { + "path": "api/server-information", + "fields": { + "name": "Server information", + "describes": [ + "/health", + "/api/v1/health", + "/ping", + "/metrics", + "/api/v3/show/license" + ], + "title": "Server information", + "description": "Retrieve server metrics, status, and version information", + "tag": "Server information", + "isConceptual": false, + "menuGroup": "Server", + "operations": [ + { + "operationId": "GetHealth", + "method": "GET", + "path": "/health", + "summary": "Health check", + "tags": [ + "Server information" + ] + }, + { + "operationId": "GetHealthV1", + "method": "GET", + "path": "/api/v1/health", + "summary": "Health check (v1-compatible)", + "tags": [ + "Server information" + ], + "compatVersion": "v1" + }, + { + "operationId": "GetPing", + "method": "GET", + "path": "/ping", + "summary": "Ping the server", + "tags": [ + "Server information" + ] + }, + { + "operationId": "GetMetrics", + "method": "GET", + "path": "/metrics", + "summary": "Metrics", + "tags": [ + "Server information" + ] + }, + { + "operationId": "GetShowLicense", + "method": "GET", + "path": "/api/v3/show/license", + "summary": "Show license information", + "tags": [ + "Server information" + ] + } + ], + "tagDescription": "Retrieve server metrics, status, and version information", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-server-information.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-server-information.yaml" + } + }, + { + "path": "api/table", + "fields": { + "name": "Table", + "describes": [ + "/api/v3/configure/table" + ], + "title": "Table", + "description": "Manage table schemas and data", + "tag": "Table", + "isConceptual": false, + "menuGroup": "Administration", + "operations": [ + { + "operationId": "PostConfigureTable", + "method": "POST", + "path": "/api/v3/configure/table", + "summary": "Create a table", + "tags": [ + "Table" + ] + }, + { + "operationId": "PatchConfigureTable", + "method": "PATCH", + "path": "/api/v3/configure/table", + "summary": "Update a table", + "tags": [ + "Table" + ] + }, + { + "operationId": "DeleteConfigureTable", + "method": "DELETE", + "path": "/api/v3/configure/table", + "summary": "Delete a table", + "tags": [ + "Table" + ] + } + ], + "tagDescription": "Manage table schemas and data", + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-table.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-table.yaml" + } + }, + { + "path": "api/write-data", + "fields": { + "name": "Write data", + "describes": [ + "/write", + "/api/v2/write", + "/api/v3/write_lp" + ], + "title": "Write data", + "description": "Write data to InfluxDB 3 using line protocol format.\n\n#### Timestamp precision across write APIs\n\nInfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions.\nThe following table compares timestamp precision support across v1, v2, and v3 write APIs:\n\n| Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) |\n|-----------|---------------|----------------------|-------------------------|\n| **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) |\n| **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` |\n| **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` |\n| **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` |\n| **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` |\n| **Minutes** | ✅ `m` | ❌ No | ❌ No |\n| **Hours** | ✅ `h` | ❌ No | ❌ No |\n| **Default** | Nanosecond | Nanosecond | **Auto** (guessed) |\n\nAll timestamps are stored internally as nanoseconds.\n", + "tag": "Write data", + "isConceptual": false, + "menuGroup": "Data Operations", + "operations": [ + { + "operationId": "PostV1Write", + "method": "POST", + "path": "/write", + "summary": "Write line protocol (v1-compatible)", + "tags": [ + "Write data" + ], + "compatVersion": "v1", + "externalDocs": { + "description": "Use compatibility APIs to write data", + "url": "/influxdb/version/write-data/http-api/compatibility-apis/" + } + }, + { + "operationId": "PostV2Write", + "method": "POST", + "path": "/api/v2/write", + "summary": "Write line protocol (v2-compatible)", + "tags": [ + "Write data" + ], + "compatVersion": "v2", + "externalDocs": { + "description": "Use compatibility APIs to write data", + "url": "/influxdb/version/write-data/http-api/compatibility-apis/" + } + }, + { + "operationId": "PostWriteLP", + "method": "POST", + "path": "/api/v3/write_lp", + "summary": "Write line protocol", + "tags": [ + "Write data" + ] + } + ], + "tagDescription": "Write data to InfluxDB 3 using line protocol format.\n\n#### Timestamp precision across write APIs\n\nInfluxDB 3 provides multiple write endpoints for compatibility with different InfluxDB versions.\nThe following table compares timestamp precision support across v1, v2, and v3 write APIs:\n\n| Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 (`/api/v3/write_lp`) |\n|-----------|---------------|----------------------|-------------------------|\n| **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) |\n| **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` |\n| **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` |\n| **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` |\n| **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` |\n| **Minutes** | ✅ `m` | ❌ No | ❌ No |\n| **Hours** | ✅ `h` | ❌ No | ❌ No |\n| **Default** | Nanosecond | Nanosecond | **Auto** (guessed) |\n\nAll timestamps are stored internally as nanoseconds.\n", + "related": [ + "/influxdb/version/write-data/http-api/compatibility-apis/" + ], + "source": "static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-write-data.yaml", + "staticFilePath": "/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-write-data.yaml" + } + } + ] +} \ No newline at end of file diff --git a/data/article_data/influxdb/influxdb3_enterprise/articles.yml b/data/article_data/influxdb/influxdb3_enterprise/articles.yml new file mode 100644 index 0000000000..d364089748 --- /dev/null +++ b/data/article_data/influxdb/influxdb3_enterprise/articles.yml @@ -0,0 +1,1046 @@ +articles: + - path: api/auth-token + fields: + name: Auth token + describes: + - /api/v3/configure/token/admin + - /api/v3/configure/token/admin/regenerate + - /api/v3/configure/token + - /api/v3/configure/token/named_admin + - /api/v3/configure/enterprise/token + title: Auth token + description: Manage tokens for authentication and authorization + tag: Auth token + isConceptual: false + menuGroup: Other + operations: + - operationId: PostCreateAdminToken + method: POST + path: /api/v3/configure/token/admin + summary: Create admin token + tags: + - Auth token + - operationId: PostRegenerateAdminToken + method: POST + path: /api/v3/configure/token/admin/regenerate + summary: Regenerate admin token + tags: + - Auth token + - operationId: DeleteToken + method: DELETE + path: /api/v3/configure/token + summary: Delete token + tags: + - Auth token + - operationId: PostCreateNamedAdminToken + method: POST + path: /api/v3/configure/token/named_admin + summary: Create named admin token + tags: + - Auth token + - operationId: PostCreateResourceToken + method: POST + path: /api/v3/configure/enterprise/token + summary: Create a resource token + tags: + - Auth token + tagDescription: Manage tokens for authentication and authorization + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-auth-token.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-auth-token.yaml + - path: api/authentication + fields: + name: Authentication + describes: + - /api/v3/configure/token/admin/regenerate + - /api/v3/configure/token + - /api/v3/configure/token/named_admin + - /api/v3/configure/enterprise/token + title: Authentication + description: > + Depending on your workflow, use one of the following schemes to + authenticate to the InfluxDB 3 API: + + + | Authentication scheme | Works with | + + |:----------------------|:-----------| + + | Bearer authentication | All endpoints | + + | Token authentication | v1 and v2 compatibility endpoints (`/write`, + `/query`, `/api/v2/write`) | + + | Basic authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + | Querystring authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + + See the **Security Schemes** section below for details on each + authentication method. + tag: Authentication + isConceptual: true + menuGroup: Concepts + operations: + - operationId: PostRegenerateAdminToken + method: POST + path: /api/v3/configure/token/admin/regenerate + summary: Regenerate admin token + tags: + - Authentication + - operationId: DeleteToken + method: DELETE + path: /api/v3/configure/token + summary: Delete token + tags: + - Authentication + - operationId: PostCreateNamedAdminToken + method: POST + path: /api/v3/configure/token/named_admin + summary: Create named admin token + tags: + - Authentication + - operationId: PostCreateResourceToken + method: POST + path: /api/v3/configure/enterprise/token + summary: Create a resource token + tags: + - Authentication + tagDescription: > + Depending on your workflow, use one of the following schemes to + authenticate to the InfluxDB 3 API: + + + | Authentication scheme | Works with | + + |:----------------------|:-----------| + + | Bearer authentication | All endpoints | + + | Token authentication | v1 and v2 compatibility endpoints (`/write`, + `/query`, `/api/v2/write`) | + + | Basic authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + | Querystring authentication | v1 compatibility endpoints (`/write`, + `/query`) | + + + See the **Security Schemes** section below for details on each + authentication method. + showSecuritySchemes: true + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-authentication.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-authentication.yaml + - path: api/cache-data + fields: + name: Cache data + describes: + - /api/v3/configure/distinct_cache + - /api/v3/configure/last_cache + title: Cache data + description: > + Manage the in-memory cache. + + + #### Distinct Value Cache + + + The Distinct Value Cache (DVC) lets you cache distinct + + values of one or more columns in a table, improving the performance of + + queries that return distinct tag and field values. + + + The DVC is an in-memory cache that stores distinct values for specific + columns + + in a table. When you create an DVC, you can specify what columns' + distinct + + values to cache, the maximum number of distinct value combinations to + cache, and + + the maximum age of cached values. A DVC is associated with a table, + which can + + have multiple DVCs. + + + #### Last value cache + + + The Last Value Cache (LVC) lets you cache the most recent + + values for specific fields in a table, improving the performance of + queries that + + return the most recent value of a field for specific series or the last + N values + + of a field. + + + The LVC is an in-memory cache that stores the last N number of values + for + + specific fields of series in a table. When you create an LVC, you can + specify + + what fields to cache, what tags to use to identify each series, and the + + number of values to cache for each unique series. + + An LVC is associated with a table, which can have multiple LVCs. + + + #### Related guides + + + - [Manage the Distinct Value + Cache](/influxdb3/enterprise/admin/distinct-value-cache/) + + - [Manage the Last Value + Cache](/influxdb3/enterprise/admin/last-value-cache/) + tag: Cache data + isConceptual: false + menuGroup: Data Operations + operations: + - operationId: PostConfigureDistinctCache + method: POST + path: /api/v3/configure/distinct_cache + summary: Create distinct cache + tags: + - Cache data + - operationId: DeleteConfigureDistinctCache + method: DELETE + path: /api/v3/configure/distinct_cache + summary: Delete distinct cache + tags: + - Cache data + - operationId: PostConfigureLastCache + method: POST + path: /api/v3/configure/last_cache + summary: Create last cache + tags: + - Cache data + - operationId: DeleteConfigureLastCache + method: DELETE + path: /api/v3/configure/last_cache + summary: Delete last cache + tags: + - Cache data + tagDescription: > + Manage the in-memory cache. + + + #### Distinct Value Cache + + + The Distinct Value Cache (DVC) lets you cache distinct + + values of one or more columns in a table, improving the performance of + + queries that return distinct tag and field values. + + + The DVC is an in-memory cache that stores distinct values for specific + columns + + in a table. When you create an DVC, you can specify what columns' + distinct + + values to cache, the maximum number of distinct value combinations to + cache, and + + the maximum age of cached values. A DVC is associated with a table, + which can + + have multiple DVCs. + + + #### Last value cache + + + The Last Value Cache (LVC) lets you cache the most recent + + values for specific fields in a table, improving the performance of + queries that + + return the most recent value of a field for specific series or the last + N values + + of a field. + + + The LVC is an in-memory cache that stores the last N number of values + for + + specific fields of series in a table. When you create an LVC, you can + specify + + what fields to cache, what tags to use to identify each series, and the + + number of values to cache for each unique series. + + An LVC is associated with a table, which can have multiple LVCs. + + + #### Related guides + + + - [Manage the Distinct Value + Cache](/influxdb3/enterprise/admin/distinct-value-cache/) + + - [Manage the Last Value + Cache](/influxdb3/enterprise/admin/last-value-cache/) + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-cache-data.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-cache-data.yaml + - path: api/database + fields: + name: Database + describes: + - /api/v3/configure/database + - /api/v3/configure/database/retention_period + - /api/v3/configure/database/{db} + title: Database + description: Manage databases + tag: Database + isConceptual: false + menuGroup: Administration + operations: + - operationId: GetConfigureDatabase + method: GET + path: /api/v3/configure/database + summary: List databases + tags: + - Database + - operationId: PostConfigureDatabase + method: POST + path: /api/v3/configure/database + summary: Create a database + tags: + - Database + - operationId: DeleteConfigureDatabase + method: DELETE + path: /api/v3/configure/database + summary: Delete a database + tags: + - Database + - operationId: DeleteDatabaseRetentionPeriod + method: DELETE + path: /api/v3/configure/database/retention_period + summary: Remove database retention period + tags: + - Database + - operationId: PatchConfigureDatabase + method: PATCH + path: /api/v3/configure/database/{db} + summary: Update a database + tags: + - Database + tagDescription: Manage databases + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-database.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-database.yaml + - path: api/headers-and-parameters + fields: + name: Headers and parameters + describes: [] + title: Headers and parameters + description: > + Most InfluxDB API endpoints require parameters in the request--for + example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API + endpoints. + + Many endpoints may require other parameters in the query string or in + the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response + headers. + + The following table shows common headers used by many InfluxDB API + endpoints. + + Some endpoints may use other headers that perform functions more + specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate + that line protocol is compressed in the request body. + + + | Header | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `Accept` | string | The content type + that the client can understand. | + + | `Authorization` | string | The [authorization + scheme and credential](/influxdb3/enterprise/api/authentication/). | + + | `Content-Length` | integer | The size of the + entity-body, in bytes. | + + | `Content-Type` | string | The format of the + data in the request body. | + tag: Headers and parameters + isConceptual: true + menuGroup: Concepts + operations: [] + tagDescription: > + Most InfluxDB API endpoints require parameters in the request--for + example, specifying the database to use. + + + ### Common parameters + + + The following table shows common parameters used by many InfluxDB API + endpoints. + + Many endpoints may require other parameters in the query string or in + the + + request body that perform functions specific to those endpoints. + + + | Query parameter | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `db` | string | The database name | + + + InfluxDB HTTP API endpoints use standard HTTP request and response + headers. + + The following table shows common headers used by many InfluxDB API + endpoints. + + Some endpoints may use other headers that perform functions more + specific to those endpoints--for example, + + the write endpoints accept the `Content-Encoding` header to indicate + that line protocol is compressed in the request body. + + + | Header | Value type | + Description | + + |:------------------------ |:--------------------- + |:-------------------------------------------| + + | `Accept` | string | The content type + that the client can understand. | + + | `Authorization` | string | The [authorization + scheme and credential](/influxdb3/enterprise/api/authentication/). | + + | `Content-Length` | integer | The size of the + entity-body, in bytes. | + + | `Content-Type` | string | The format of the + data in the request body. | + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-headers-and-parameters.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-headers-and-parameters.yaml + - path: api/migrate-from-influxdb-v1-or-v2 + fields: + name: Migrate from InfluxDB v1 or v2 + describes: [] + title: Migrate from InfluxDB v1 or v2 + description: > + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x + and 2.x client libraries and tools. + + Operations marked with v1 or v2 badges are compatible with the + respective InfluxDB version. + + + ### Migration guides + + + - [Migrate from InfluxDB + v1](/influxdb3/enterprise/guides/migrate/influxdb-1x/) - For users + migrating from InfluxDB 1.x + + - [Migrate from InfluxDB + v2](/influxdb3/enterprise/guides/migrate/influxdb-2x/) - For users + migrating from InfluxDB 2.x or Cloud + + - [Use compatibility APIs to write + data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - + v1 and v2 write endpoints + + - [Use the v1 HTTP query + API](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + - InfluxQL queries via HTTP + tag: Migrate from InfluxDB v1 or v2 + isConceptual: true + menuGroup: Other + operations: [] + tagDescription: > + Migrate your existing InfluxDB v1 or v2 workloads to InfluxDB 3. + + + InfluxDB 3 provides compatibility endpoints that work with InfluxDB 1.x + and 2.x client libraries and tools. + + Operations marked with v1 or v2 badges are compatible with the + respective InfluxDB version. + + + ### Migration guides + + + - [Migrate from InfluxDB + v1](/influxdb3/enterprise/guides/migrate/influxdb-1x/) - For users + migrating from InfluxDB 1.x + + - [Migrate from InfluxDB + v2](/influxdb3/enterprise/guides/migrate/influxdb-2x/) - For users + migrating from InfluxDB 2.x or Cloud + + - [Use compatibility APIs to write + data](/influxdb3/enterprise/write-data/http-api/compatibility-apis/) - + v1 and v2 write endpoints + + - [Use the v1 HTTP query + API](/influxdb3/enterprise/query-data/execute-queries/influxdb-v1-api/) + - InfluxQL queries via HTTP + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-migrate-from-influxdb-v1-or-v2.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-migrate-from-influxdb-v1-or-v2.yaml + - path: api/processing-engine + fields: + name: Processing engine + describes: + - /api/v3/configure/processing_engine_trigger + - /api/v3/configure/processing_engine_trigger/disable + - /api/v3/configure/processing_engine_trigger/enable + - /api/v3/configure/plugin_environment/install_packages + - /api/v3/configure/plugin_environment/install_requirements + - /api/v3/plugin_test/wal + - /api/v3/plugin_test/schedule + - /api/v3/engine/{request_path} + - /api/v3/plugins/files + - /api/v3/plugins/directory + title: Processing engine + description: > + Manage Processing engine triggers, test plugins, and send requests to + trigger On Request plugins. + + + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python + VM that can dynamically load and trigger Python plugins in response to + events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks + for different database events. + + + To get started with the processing engine, see the [Processing engine + and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + tag: Processing engine + isConceptual: false + menuGroup: Processing Engine + operations: + - operationId: PostConfigureProcessingEngineTrigger + method: POST + path: /api/v3/configure/processing_engine_trigger + summary: Create processing engine trigger + tags: + - Processing engine + - operationId: DeleteConfigureProcessingEngineTrigger + method: DELETE + path: /api/v3/configure/processing_engine_trigger + summary: Delete processing engine trigger + tags: + - Processing engine + - operationId: PostDisableProcessingEngineTrigger + method: POST + path: /api/v3/configure/processing_engine_trigger/disable + summary: Disable processing engine trigger + tags: + - Processing engine + - operationId: PostEnableProcessingEngineTrigger + method: POST + path: /api/v3/configure/processing_engine_trigger/enable + summary: Enable processing engine trigger + tags: + - Processing engine + - operationId: PostInstallPluginPackages + method: POST + path: /api/v3/configure/plugin_environment/install_packages + summary: Install plugin packages + tags: + - Processing engine + - operationId: PostInstallPluginRequirements + method: POST + path: /api/v3/configure/plugin_environment/install_requirements + summary: Install plugin requirements + tags: + - Processing engine + - operationId: PostTestWALPlugin + method: POST + path: /api/v3/plugin_test/wal + summary: Test WAL plugin + tags: + - Processing engine + - operationId: PostTestSchedulingPlugin + method: POST + path: /api/v3/plugin_test/schedule + summary: Test scheduling plugin + tags: + - Processing engine + - operationId: GetProcessingEnginePluginRequest + method: GET + path: /api/v3/engine/{request_path} + summary: On Request processing engine plugin request + tags: + - Processing engine + - operationId: PostProcessingEnginePluginRequest + method: POST + path: /api/v3/engine/{request_path} + summary: On Request processing engine plugin request + tags: + - Processing engine + - operationId: PutPluginFile + method: PUT + path: /api/v3/plugins/files + summary: Update plugin file + tags: + - Processing engine + - operationId: PutPluginDirectory + method: PUT + path: /api/v3/plugins/directory + summary: Update plugin directory + tags: + - Processing engine + tagDescription: > + Manage Processing engine triggers, test plugins, and send requests to + trigger On Request plugins. + + + InfluxDB 3 provides the InfluxDB 3 processing engine, an embedded Python + VM that can dynamically load and trigger Python plugins in response to + events in your database. + + Use Processing engine plugins and triggers to run code and perform tasks + for different database events. + + + To get started with the processing engine, see the [Processing engine + and Python plugins](/influxdb3/enterprise/processing-engine/) guide. + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-processing-engine.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-processing-engine.yaml + - path: api/query-data + fields: + name: Query data + describes: + - /api/v3/query_sql + - /api/v3/query_influxql + - /query + title: Query data + description: Query data using SQL or InfluxQL + tag: Query data + isConceptual: false + menuGroup: Data Operations + operations: + - operationId: GetExecuteQuerySQL + method: GET + path: /api/v3/query_sql + summary: Execute SQL query + tags: + - Query data + - operationId: PostExecuteQuerySQL + method: POST + path: /api/v3/query_sql + summary: Execute SQL query + tags: + - Query data + - operationId: GetExecuteInfluxQLQuery + method: GET + path: /api/v3/query_influxql + summary: Execute InfluxQL query + tags: + - Query data + - operationId: PostExecuteQueryInfluxQL + method: POST + path: /api/v3/query_influxql + summary: Execute InfluxQL query + tags: + - Query data + - operationId: GetV1ExecuteQuery + method: GET + path: /query + summary: Execute InfluxQL query (v1-compatible) + tags: + - Query data + compatVersion: v1 + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + - operationId: PostExecuteV1Query + method: POST + path: /query + summary: Execute InfluxQL query (v1-compatible) + tags: + - Query data + compatVersion: v1 + externalDocs: + description: Use the InfluxDB v1 HTTP query API and InfluxQL to query data + url: /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + tagDescription: Query data using SQL or InfluxQL + related: + - /influxdb/version/query-data/execute-queries/influxdb-v1-api/ + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-query-data.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-query-data.yaml + - path: api/quick-start + fields: + name: Quick start + describes: [] + title: Quick start + description: > + 1. [Create an admin token](#section/Authentication) to authorize API + requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB + server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3, see the [Get + started](/influxdb3/enterprise/get-started/) guide. + tag: Quick start + isConceptual: true + menuGroup: Concepts + operations: [] + tagDescription: > + 1. [Create an admin token](#section/Authentication) to authorize API + requests. + + ```bash + curl -X POST "http://localhost:8181/api/v3/configure/token/admin" + ``` + 2. [Check the status](#section/Server-information) of the InfluxDB + server. + + ```bash + curl "http://localhost:8181/health" \ + --header "Authorization: Bearer ADMIN_TOKEN" + ``` + + 3. [Write data](#operation/PostWriteLP) to InfluxDB. + + ```bash + curl "http://localhost:8181/api/v3/write_lp?db=sensors&precision=auto" + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-raw "home,room=Kitchen temp=72.0 + home,room=Living\ room temp=71.5" + ``` + + If all data is written, the response is `204 No Content`. + + 4. [Query data](#operation/GetExecuteQuerySQL) from InfluxDB. + + ```bash + curl -G "http://localhost:8181/api/v3/query_sql" \ + --header "Authorization: Bearer ADMIN_TOKEN" \ + --data-urlencode "db=sensors" \ + --data-urlencode "q=SELECT * FROM home WHERE room='Living room'" \ + --data-urlencode "format=jsonl" + ``` + + Output: + + ```jsonl + {"room":"Living room","temp":71.5,"time":"2025-02-25T20:19:34.984098"} + ``` + + For more information about using InfluxDB 3, see the [Get + started](/influxdb3/enterprise/get-started/) guide. + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-quick-start.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-quick-start.yaml + - path: api/server-information + fields: + name: Server information + describes: + - /health + - /api/v1/health + - /ping + - /metrics + - /api/v3/show/license + title: Server information + description: Retrieve server metrics, status, and version information + tag: Server information + isConceptual: false + menuGroup: Server + operations: + - operationId: GetHealth + method: GET + path: /health + summary: Health check + tags: + - Server information + - operationId: GetHealthV1 + method: GET + path: /api/v1/health + summary: Health check (v1-compatible) + tags: + - Server information + compatVersion: v1 + - operationId: GetPing + method: GET + path: /ping + summary: Ping the server + tags: + - Server information + - operationId: GetMetrics + method: GET + path: /metrics + summary: Metrics + tags: + - Server information + - operationId: GetShowLicense + method: GET + path: /api/v3/show/license + summary: Show license information + tags: + - Server information + tagDescription: Retrieve server metrics, status, and version information + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-server-information.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-server-information.yaml + - path: api/table + fields: + name: Table + describes: + - /api/v3/configure/table + title: Table + description: Manage table schemas and data + tag: Table + isConceptual: false + menuGroup: Administration + operations: + - operationId: PostConfigureTable + method: POST + path: /api/v3/configure/table + summary: Create a table + tags: + - Table + - operationId: PatchConfigureTable + method: PATCH + path: /api/v3/configure/table + summary: Update a table + tags: + - Table + - operationId: DeleteConfigureTable + method: DELETE + path: /api/v3/configure/table + summary: Delete a table + tags: + - Table + tagDescription: Manage table schemas and data + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-table.yaml + staticFilePath: /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-table.yaml + - path: api/write-data + fields: + name: Write data + describes: + - /write + - /api/v2/write + - /api/v3/write_lp + title: Write data + description: > + Write data to InfluxDB 3 using line protocol format. + + + #### Timestamp precision across write APIs + + + InfluxDB 3 provides multiple write endpoints for compatibility with + different InfluxDB versions. + + The following table compares timestamp precision support across v1, v2, + and v3 write APIs: + + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 + (`/api/v3/write_lp`) | + + |-----------|---------------|----------------------|-------------------------| + + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + + | **Hours** | ✅ `h` | ❌ No | ❌ No | + + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + + All timestamps are stored internally as nanoseconds. + tag: Write data + isConceptual: false + menuGroup: Data Operations + operations: + - operationId: PostV1Write + method: POST + path: /write + summary: Write line protocol (v1-compatible) + tags: + - Write data + compatVersion: v1 + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ + - operationId: PostV2Write + method: POST + path: /api/v2/write + summary: Write line protocol (v2-compatible) + tags: + - Write data + compatVersion: v2 + externalDocs: + description: Use compatibility APIs to write data + url: /influxdb/version/write-data/http-api/compatibility-apis/ + - operationId: PostWriteLP + method: POST + path: /api/v3/write_lp + summary: Write line protocol + tags: + - Write data + tagDescription: > + Write data to InfluxDB 3 using line protocol format. + + + #### Timestamp precision across write APIs + + + InfluxDB 3 provides multiple write endpoints for compatibility with + different InfluxDB versions. + + The following table compares timestamp precision support across v1, v2, + and v3 write APIs: + + + | Precision | v1 (`/write`) | v2 (`/api/v2/write`) | v3 + (`/api/v3/write_lp`) | + + |-----------|---------------|----------------------|-------------------------| + + | **Auto detection** | ❌ No | ❌ No | ✅ `auto` (default) | + + | **Seconds** | ✅ `s` | ✅ `s` | ✅ `second` | + + | **Milliseconds** | ✅ `ms` | ✅ `ms` | ✅ `millisecond` | + + | **Microseconds** | ✅ `u` or `µ` | ✅ `us` | ✅ `microsecond` | + + | **Nanoseconds** | ✅ `ns` | ✅ `ns` | ✅ `nanosecond` | + + | **Minutes** | ✅ `m` | ❌ No | ❌ No | + + | **Hours** | ✅ `h` | ❌ No | ❌ No | + + | **Default** | Nanosecond | Nanosecond | **Auto** (guessed) | + + + All timestamps are stored internally as nanoseconds. + related: + - /influxdb/version/write-data/http-api/compatibility-apis/ + source: >- + static/openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-write-data.yaml + staticFilePath: >- + /openapi/influxdb3-enterprise/tags/tags/influxdb3-enterprise-write-data.yaml diff --git a/data/article_data/influxdb/oss-v2/articles.yml b/data/article_data/influxdb/oss-v2/articles.yml new file mode 100644 index 0000000000..aa37b5a614 --- /dev/null +++ b/data/article_data/influxdb/oss-v2/articles.yml @@ -0,0 +1,757 @@ +articles: + - path: api/v2/authorizations + fields: + name: /api/v2/authorizations + describes: + - /api/v2/authorizations + - /api/v2/authorizations/{authID} + title: |- + /api/v2/authorizations + InfluxDB OSS API Service + tags: + - api-v2 + - authorizations + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-authorizations.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-authorizations.yaml + - path: api/v2/backup + fields: + name: /api/v2/backup + describes: + - /api/v2/backup/kv + - /api/v2/backup/metadata + - /api/v2/backup/shards/{shardID} + title: |- + /api/v2/backup + InfluxDB OSS API Service + tags: + - api-v2 + - backup + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-backup.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-backup.yaml + - path: api/v2/buckets + fields: + name: /api/v2/buckets + describes: + - /api/v2/buckets + - /api/v2/buckets/{bucketID} + - /api/v2/buckets/{bucketID}/labels + - /api/v2/buckets/{bucketID}/labels/{labelID} + - /api/v2/buckets/{bucketID}/members + - /api/v2/buckets/{bucketID}/members/{userID} + - /api/v2/buckets/{bucketID}/owners + - /api/v2/buckets/{bucketID}/owners/{userID} + title: |- + /api/v2/buckets + InfluxDB OSS API Service + tags: + - api-v2 + - buckets + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-buckets.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-buckets.yaml + - path: api/v2/checks + fields: + name: /api/v2/checks + describes: + - /api/v2/checks + - /api/v2/checks/{checkID} + - /api/v2/checks/{checkID}/labels + - /api/v2/checks/{checkID}/labels/{labelID} + - /api/v2/checks/{checkID}/query + title: |- + /api/v2/checks + InfluxDB OSS API Service + tags: + - api-v2 + - checks + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-checks.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-checks.yaml + - path: api/v2/config + fields: + name: /api/v2/config + describes: + - /api/v2/config + title: |- + /api/v2/config + InfluxDB OSS API Service + tags: + - api-v2 + - config + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-config.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-config.yaml + - path: api/v2/dashboards + fields: + name: /api/v2/dashboards + describes: + - /api/v2/dashboards + - /api/v2/dashboards/{dashboardID} + - /api/v2/dashboards/{dashboardID}/cells + - /api/v2/dashboards/{dashboardID}/cells/{cellID} + - /api/v2/dashboards/{dashboardID}/cells/{cellID}/view + - /api/v2/dashboards/{dashboardID}/labels + - /api/v2/dashboards/{dashboardID}/labels/{labelID} + - /api/v2/dashboards/{dashboardID}/members + - /api/v2/dashboards/{dashboardID}/members/{userID} + - /api/v2/dashboards/{dashboardID}/owners + - /api/v2/dashboards/{dashboardID}/owners/{userID} + title: |- + /api/v2/dashboards + InfluxDB OSS API Service + tags: + - api-v2 + - dashboards + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-dashboards.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-dashboards.yaml + - path: api/v2/dbrps + fields: + name: /api/v2/dbrps + describes: + - /api/v2/dbrps + - /api/v2/dbrps/{dbrpID} + title: |- + /api/v2/dbrps + InfluxDB OSS API Service + tags: + - api-v2 + - dbrps + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-dbrps.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-dbrps.yaml + - path: api/v2/delete + fields: + name: /api/v2/delete + describes: + - /api/v2/delete + title: |- + /api/v2/delete + InfluxDB OSS API Service + tags: + - api-v2 + - delete + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-delete.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-delete.yaml + - path: api/v2/flags + fields: + name: /api/v2/flags + describes: + - /api/v2/flags + title: |- + /api/v2/flags + InfluxDB OSS API Service + tags: + - api-v2 + - flags + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-flags.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-flags.yaml + - path: api/v2/labels + fields: + name: /api/v2/labels + describes: + - /api/v2/labels + - /api/v2/labels/{labelID} + title: |- + /api/v2/labels + InfluxDB OSS API Service + tags: + - api-v2 + - labels + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-labels.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-labels.yaml + - path: api/v2/maps + fields: + name: /api/v2/maps + describes: + - /api/v2/maps/mapToken + title: |- + /api/v2/maps + InfluxDB OSS API Service + tags: + - api-v2 + - maps + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-maps.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-maps.yaml + - path: api/v2/me + fields: + name: /api/v2/me + describes: + - /api/v2/me + - /api/v2/me/password + title: |- + /api/v2/me + InfluxDB OSS API Service + tags: + - api-v2 + - me + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-me.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-me.yaml + - path: api/v2/notificationEndpoints + fields: + name: /api/v2/notificationEndpoints + describes: + - /api/v2/notificationEndpoints + - /api/v2/notificationEndpoints/{endpointID} + - /api/v2/notificationEndpoints/{endpointID}/labels + - /api/v2/notificationEndpoints/{endpointID}/labels/{labelID} + title: |- + /api/v2/notificationEndpoints + InfluxDB OSS API Service + tags: + - api-v2 + - notificationEndpoints + source: >- + static/openapi/influxdb-oss-v2/paths/ref-api-v2-notificationEndpoints.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-notificationEndpoints.yaml + - path: api/v2/notificationRules + fields: + name: /api/v2/notificationRules + describes: + - /api/v2/notificationRules + - /api/v2/notificationRules/{ruleID} + - /api/v2/notificationRules/{ruleID}/labels + - /api/v2/notificationRules/{ruleID}/labels/{labelID} + - /api/v2/notificationRules/{ruleID}/query + title: |- + /api/v2/notificationRules + InfluxDB OSS API Service + tags: + - api-v2 + - notificationRules + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-notificationRules.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-notificationRules.yaml + - path: api/v2/orgs + fields: + name: /api/v2/orgs + describes: + - /api/v2/orgs + - /api/v2/orgs/{orgID} + - /api/v2/orgs/{orgID}/members + - /api/v2/orgs/{orgID}/members/{userID} + - /api/v2/orgs/{orgID}/owners + - /api/v2/orgs/{orgID}/owners/{userID} + - /api/v2/orgs/{orgID}/secrets + - /api/v2/orgs/{orgID}/secrets/delete + - /api/v2/orgs/{orgID}/secrets/{secretID} + title: |- + /api/v2/orgs + InfluxDB OSS API Service + tags: + - api-v2 + - orgs + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-orgs.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-orgs.yaml + - path: api/v2/query + fields: + name: /api/v2/query + describes: + - /api/v2/query + - /api/v2/query/analyze + - /api/v2/query/ast + - /api/v2/query/suggestions + - /api/v2/query/suggestions/{name} + title: |- + /api/v2/query + InfluxDB OSS API Service + tags: + - api-v2 + - query + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-query.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-query.yaml + - path: api/v2/remotes + fields: + name: /api/v2/remotes + describes: + - /api/v2/remotes + - /api/v2/remotes/{remoteID} + title: |- + /api/v2/remotes + InfluxDB OSS API Service + tags: + - api-v2 + - remotes + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-remotes.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-remotes.yaml + - path: api/v2/replications + fields: + name: /api/v2/replications + describes: + - /api/v2/replications + - /api/v2/replications/{replicationID} + - /api/v2/replications/{replicationID}/validate + title: |- + /api/v2/replications + InfluxDB OSS API Service + tags: + - api-v2 + - replications + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-replications.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-replications.yaml + - path: api/v2/resources + fields: + name: /api/v2/resources + describes: + - /api/v2/resources + title: |- + /api/v2/resources + InfluxDB OSS API Service + tags: + - api-v2 + - resources + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-resources.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-resources.yaml + - path: api/v2/restore + fields: + name: /api/v2/restore + describes: + - /api/v2/restore/bucket/{bucketID} + - /api/v2/restore/bucketMetadata + - /api/v2/restore/kv + - /api/v2/restore/shards/{shardID} + - /api/v2/restore/sql + title: |- + /api/v2/restore + InfluxDB OSS API Service + tags: + - api-v2 + - restore + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-restore.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-restore.yaml + - path: api/v2/scrapers + fields: + name: /api/v2/scrapers + describes: + - /api/v2/scrapers + - /api/v2/scrapers/{scraperTargetID} + - /api/v2/scrapers/{scraperTargetID}/labels + - /api/v2/scrapers/{scraperTargetID}/labels/{labelID} + - /api/v2/scrapers/{scraperTargetID}/members + - /api/v2/scrapers/{scraperTargetID}/members/{userID} + - /api/v2/scrapers/{scraperTargetID}/owners + - /api/v2/scrapers/{scraperTargetID}/owners/{userID} + title: |- + /api/v2/scrapers + InfluxDB OSS API Service + tags: + - api-v2 + - scrapers + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-scrapers.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-scrapers.yaml + - path: api/v2/setup + fields: + name: /api/v2/setup + describes: + - /api/v2/setup + title: |- + /api/v2/setup + InfluxDB OSS API Service + tags: + - api-v2 + - setup + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-setup.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-setup.yaml + - path: api/v2/signin + fields: + name: /api/v2/signin + describes: + - /api/v2/signin + title: |- + /api/v2/signin + InfluxDB OSS API Service + tags: + - api-v2 + - signin + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-signin.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-signin.yaml + - path: api/v2/signout + fields: + name: /api/v2/signout + describes: + - /api/v2/signout + title: |- + /api/v2/signout + InfluxDB OSS API Service + tags: + - api-v2 + - signout + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-signout.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-signout.yaml + - path: api/v2/sources + fields: + name: /api/v2/sources + describes: + - /api/v2/sources + - /api/v2/sources/{sourceID} + - /api/v2/sources/{sourceID}/buckets + - /api/v2/sources/{sourceID}/health + title: |- + /api/v2/sources + InfluxDB OSS API Service + tags: + - api-v2 + - sources + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-sources.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-sources.yaml + - path: api/v2/stacks + fields: + name: /api/v2/stacks + describes: + - /api/v2/stacks + - /api/v2/stacks/{stack_id} + - /api/v2/stacks/{stack_id}/uninstall + title: |- + /api/v2/stacks + InfluxDB OSS API Service + tags: + - api-v2 + - stacks + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-stacks.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-stacks.yaml + - path: api/v2/tasks + fields: + name: /api/v2/tasks + describes: + - /api/v2/tasks + - /api/v2/tasks/{taskID} + - /api/v2/tasks/{taskID}/labels + - /api/v2/tasks/{taskID}/labels/{labelID} + - /api/v2/tasks/{taskID}/logs + - /api/v2/tasks/{taskID}/members + - /api/v2/tasks/{taskID}/members/{userID} + - /api/v2/tasks/{taskID}/owners + - /api/v2/tasks/{taskID}/owners/{userID} + - /api/v2/tasks/{taskID}/runs + - /api/v2/tasks/{taskID}/runs/{runID} + - /api/v2/tasks/{taskID}/runs/{runID}/logs + - /api/v2/tasks/{taskID}/runs/{runID}/retry + title: |- + /api/v2/tasks + InfluxDB OSS API Service + tags: + - api-v2 + - tasks + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-tasks.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-tasks.yaml + - path: api/v2/telegraf + fields: + name: /api/v2/telegraf + describes: + - /api/v2/telegraf/plugins + title: |- + /api/v2/telegraf + InfluxDB OSS API Service + tags: + - api-v2 + - telegraf + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-telegraf.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-telegraf.yaml + - path: api/v2/telegrafs + fields: + name: /api/v2/telegrafs + describes: + - /api/v2/telegrafs + - /api/v2/telegrafs/{telegrafID} + - /api/v2/telegrafs/{telegrafID}/labels + - /api/v2/telegrafs/{telegrafID}/labels/{labelID} + - /api/v2/telegrafs/{telegrafID}/members + - /api/v2/telegrafs/{telegrafID}/members/{userID} + - /api/v2/telegrafs/{telegrafID}/owners + - /api/v2/telegrafs/{telegrafID}/owners/{userID} + title: |- + /api/v2/telegrafs + InfluxDB OSS API Service + tags: + - api-v2 + - telegrafs + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-telegrafs.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-telegrafs.yaml + - path: api/v2/templates + fields: + name: /api/v2/templates + describes: + - /api/v2/templates/apply + - /api/v2/templates/export + title: |- + /api/v2/templates + InfluxDB OSS API Service + tags: + - api-v2 + - templates + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-templates.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-templates.yaml + - path: api/v2/users + fields: + name: /api/v2/users + describes: + - /api/v2/users + - /api/v2/users/{userID} + - /api/v2/users/{userID}/password + title: |- + /api/v2/users + InfluxDB OSS API Service + tags: + - api-v2 + - users + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-users.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-users.yaml + - path: api/v2/variables + fields: + name: /api/v2/variables + describes: + - /api/v2/variables + - /api/v2/variables/{variableID} + - /api/v2/variables/{variableID}/labels + - /api/v2/variables/{variableID}/labels/{labelID} + title: |- + /api/v2/variables + InfluxDB OSS API Service + tags: + - api-v2 + - variables + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-variables.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-variables.yaml + - path: api/v2/write + fields: + name: /api/v2/write + describes: + - /api/v2/write + title: |- + /api/v2/write + InfluxDB OSS API Service + tags: + - api-v2 + - write + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2-write.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2-write.yaml + - path: api/v2 + fields: + name: /api/v2 + describes: + - /api/v2 + title: |- + /api/v2 + InfluxDB OSS API Service + tags: + - api + - v2 + source: static/openapi/influxdb-oss-v2/paths/ref-api-v2.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-api-v2.yaml + - path: debug/pprof/all + fields: + name: /debug/pprof/all + describes: + - /debug/pprof/all + title: |- + /debug/pprof/all + InfluxDB OSS API Service + tags: + - debug-pprof + - all + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-all.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-all.yaml + - path: debug/pprof/allocs + fields: + name: /debug/pprof/allocs + describes: + - /debug/pprof/allocs + title: |- + /debug/pprof/allocs + InfluxDB OSS API Service + tags: + - debug-pprof + - allocs + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-allocs.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-allocs.yaml + - path: debug/pprof/block + fields: + name: /debug/pprof/block + describes: + - /debug/pprof/block + title: |- + /debug/pprof/block + InfluxDB OSS API Service + tags: + - debug-pprof + - block + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-block.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-block.yaml + - path: debug/pprof/cmdline + fields: + name: /debug/pprof/cmdline + describes: + - /debug/pprof/cmdline + title: |- + /debug/pprof/cmdline + InfluxDB OSS API Service + tags: + - debug-pprof + - cmdline + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-cmdline.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-cmdline.yaml + - path: debug/pprof/goroutine + fields: + name: /debug/pprof/goroutine + describes: + - /debug/pprof/goroutine + title: |- + /debug/pprof/goroutine + InfluxDB OSS API Service + tags: + - debug-pprof + - goroutine + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-goroutine.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-goroutine.yaml + - path: debug/pprof/heap + fields: + name: /debug/pprof/heap + describes: + - /debug/pprof/heap + title: |- + /debug/pprof/heap + InfluxDB OSS API Service + tags: + - debug-pprof + - heap + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-heap.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-heap.yaml + - path: debug/pprof/mutex + fields: + name: /debug/pprof/mutex + describes: + - /debug/pprof/mutex + title: |- + /debug/pprof/mutex + InfluxDB OSS API Service + tags: + - debug-pprof + - mutex + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-mutex.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-mutex.yaml + - path: debug/pprof/profile + fields: + name: /debug/pprof/profile + describes: + - /debug/pprof/profile + title: |- + /debug/pprof/profile + InfluxDB OSS API Service + tags: + - debug-pprof + - profile + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-profile.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-profile.yaml + - path: debug/pprof/threadcreate + fields: + name: /debug/pprof/threadcreate + describes: + - /debug/pprof/threadcreate + title: |- + /debug/pprof/threadcreate + InfluxDB OSS API Service + tags: + - debug-pprof + - threadcreate + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-threadcreate.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-threadcreate.yaml + - path: debug/pprof/trace + fields: + name: /debug/pprof/trace + describes: + - /debug/pprof/trace + title: |- + /debug/pprof/trace + InfluxDB OSS API Service + tags: + - debug-pprof + - trace + source: static/openapi/influxdb-oss-v2/paths/ref-debug-pprof-trace.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-debug-pprof-trace.yaml + - path: health + fields: + name: /health + describes: + - /health + title: |- + /health + InfluxDB OSS API Service + tags: + - '' + - health + source: static/openapi/influxdb-oss-v2/paths/ref-health.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-health.yaml + - path: legacy/authorizations + fields: + name: /legacy/authorizations + describes: + - /legacy/authorizations + - /legacy/authorizations/{authID} + - /legacy/authorizations/{authID}/password + title: |- + /legacy/authorizations + InfluxDB OSS API Service + tags: + - legacy + - authorizations + source: static/openapi/influxdb-oss-v2/paths/ref-legacy-authorizations.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-legacy-authorizations.yaml + - path: metrics + fields: + name: /metrics + describes: + - /metrics + title: |- + /metrics + InfluxDB OSS API Service + tags: + - '' + - metrics + source: static/openapi/influxdb-oss-v2/paths/ref-metrics.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-metrics.yaml + - path: ping + fields: + name: /ping + describes: + - /ping + title: |- + /ping + InfluxDB OSS API Service + tags: + - '' + - ping + source: static/openapi/influxdb-oss-v2/paths/ref-ping.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-ping.yaml + - path: query + fields: + name: /query + describes: + - /query + title: |- + /query + InfluxDB OSS API Service + tags: + - '' + - query + source: static/openapi/influxdb-oss-v2/paths/ref-query.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-query.yaml + - path: ready + fields: + name: /ready + describes: + - /ready + title: |- + /ready + InfluxDB OSS API Service + tags: + - '' + - ready + source: static/openapi/influxdb-oss-v2/paths/ref-ready.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-ready.yaml + - path: write + fields: + name: /write + describes: + - /write + title: |- + /write + InfluxDB OSS API Service + tags: + - '' + - write + source: static/openapi/influxdb-oss-v2/paths/ref-write.yaml + staticFilePath: /openapi/influxdb-oss-v2/paths/ref-write.yaml diff --git a/docs/plans/2024-12-10-standalone-operation-pages-design.md b/docs/plans/2024-12-10-standalone-operation-pages-design.md new file mode 100644 index 0000000000..54d084ddc6 --- /dev/null +++ b/docs/plans/2024-12-10-standalone-operation-pages-design.md @@ -0,0 +1,181 @@ +# Standalone API Operation Pages Design + +## Overview + +Create individual pages for each API operation with path-based URLs, rendered using RapiDoc Mini with existing tag-level OpenAPI specs. + +## Goals + +- **SEO/discoverability**: Each operation indexable with its own URL and metadata +- **Deep linking**: Reliable bookmarkable/shareable URLs for specific operations +- **Navigation UX**: Sidebar links navigate to actual pages (not hash fragments) +- **Content customization**: Foundation for adding operation-specific guides and examples + +## URL Structure + +Path-based URLs with HTTP method as the final segment: + +| Operation | API Path | Page URL | +| ------------------ | ----------------------------------- | ------------------------------------ | +| PostV1Write | `POST /write` | `/api/write/post/` | +| PostV2Write | `POST /api/v2/write` | `/api/v2/write/post/` | +| PostWriteLP | `POST /api/v3/write_lp` | `/api/v3/write_lp/post/` | +| GetV1ExecuteQuery | `GET /query` | `/api/query/get/` | +| PostExecuteV1Query | `POST /query` | `/api/query/post/` | +| GetExecuteQuerySQL | `GET /api/v3/query_sql` | `/api/v3/query_sql/get/` | +| GetDatabases | `GET /api/v3/configure/database` | `/api/v3/configure/database/get/` | +| DeleteDatabase | `DELETE /api/v3/configure/database` | `/api/v3/configure/database/delete/` | + +## Architecture + +### Existing Components (unchanged) + +- **Tag pages**: `/api/write-data/`, `/api/query-data/` etc. remain as landing pages +- **Tag-level specs**: `static/openapi/influxdb-{product}/tags/tags/ref-{tag}.yaml` (\~30-50KB each) +- **Sidebar structure**: Tag-based groups with operation summaries as link text + +### New Components + +1. **Operation page content files**: Generated at path-based locations +2. **Operation page template**: Hugo layout using RapiDoc Mini +3. **Updated sidebar links**: Point to path-based URLs instead of hash fragments + +## Content File Structure + +Generated operation pages at `content/influxdb3/{product}/api/{path}/{method}/_index.md`: + +```yaml +--- +title: Write line protocol (v1-compatible) +description: Write data using InfluxDB v1-compatible line protocol endpoint +type: api-operation +layout: operation +# RapiDoc Mini configuration +specFile: /openapi/influxdb-influxdb3_core/tags/tags/ref-write-data.yaml +matchPaths: post /write +# Operation metadata +operationId: PostV1Write +method: POST +apiPath: /write +tag: Write data +compatVersion: v1 +# Links +related: + - /influxdb3/core/write-data/http-api/compatibility-apis/ +--- +``` + +## Hugo Template + +New layout `layouts/api-operation/operation.html`: + +```html +{{ define "main" }} +
+
+

{{ .Title }}

+
+ {{ .Params.method }} + {{ .Params.apiPath }} + {{ with .Params.compatVersion }} + {{ . }} + {{ end }} +
+
+ + + + + {{ with .Params.related }} + + {{ end }} +
+{{ end }} +``` + +## Sidebar Navigation Changes + +Update `layouts/partials/sidebar/api-menu-items.html` to generate path-based URLs: + +**Before:** + +```go +{{ $fragment := printf "#operation/%s" .operationId }} +{{ $fullUrl := printf "%s%s" $tagPageUrl $fragment }} +``` + +**After:** + +```go +{{ $apiPath := .path }} +{{ $method := lower .method }} +{{ $pathSlug := $apiPath | replaceRE "^/" "" }} +{{ $operationUrl := printf "/%s/%s/api/%s/%s/" $product $version $pathSlug $method | relURL }} +``` + +## Generator Changes + +Update `api-docs/scripts/openapi-paths-to-hugo-data/index.ts`: + +1. Add new function `generateOperationPages()` that creates content files for each operation +2. Include operation metadata: specFile path, matchPaths filter, tag association +3. Call from `generateHugoDataByTag()` after generating tag-based articles + +## File Generation Summary + +For InfluxDB 3 Core (\~43 operations), this creates: + +- \~43 new content files at `content/influxdb3/core/api/{path}/{method}/_index.md` +- No new spec files (reuses existing tag-level specs) + +## Data Flow + +``` +OpenAPI Spec + ↓ +Generator extracts operations + ↓ +Creates content files with frontmatter (specFile, matchPaths, metadata) + ↓ +Hugo builds pages using api-operation/operation.html template + ↓ +RapiDoc Mini loads tag-level spec, filters to single operation client-side +``` + +## Testing Plan + +1. Generate operation pages for Core product +2. Verify URLs resolve correctly +3. Verify RapiDoc Mini renders single operation +4. Verify sidebar links navigate to operation pages +5. Test deep linking (direct URL access) +6. Check page titles and meta descriptions for SEO + +## Future Improvements + +- Generate operation-level specs for smaller payloads (if performance issues arise) +- Add custom content sections per operation +- Implement operation search/filtering on tag pages + +## Migration Notes + +When migrating other product specs from Redoc to RapiDoc: + +1. **Remove `x-tagGroups`**: This is a Redoc-specific extension for sidebar navigation grouping. RapiDoc doesn't use it. The Hugo sidebar uses `data/api_nav_groups.yml` instead. + +2. **Ensure tag consistency**: The sidebar navigation (`api_nav_groups.yml`) must match the tag names in the spec's `tags` section exactly. + +3. **Single-tag operations**: Operations should ideally have a single tag to avoid duplicate rendering. If an operation has multiple tags, the generator restricts it to the primary tag in tag-specific specs. diff --git a/docs/plans/2024-12-12-api-code-review-fixes.md b/docs/plans/2024-12-12-api-code-review-fixes.md new file mode 100644 index 0000000000..6360c9f69e --- /dev/null +++ b/docs/plans/2024-12-12-api-code-review-fixes.md @@ -0,0 +1,779 @@ +# API Code Review Fixes Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Fix code review violations by extracting inline JavaScript from rapidoc.html into a TypeScript component and removing unused Scalar renderer code. + +**Architecture:** Create a new `api-rapidoc.ts` TypeScript component following the established component pattern (same as `rapidoc-mini.ts`). The component handles theme synchronization, shadow DOM manipulation, and MutationObserver setup. Remove the Scalar renderer, api-tabs component, and associated partials since they're no longer used. + +**Tech Stack:** TypeScript, Hugo templates, SCSS, Cypress + +*** + +## Task 1: Create api-rapidoc.ts TypeScript Component + +**Files:** + +- Create: `assets/js/components/api-rapidoc.ts` + +**Step 1: Create the TypeScript component file** + +Create `assets/js/components/api-rapidoc.ts` with the following content: + +```typescript +/** + * RapiDoc API Documentation Component + * + * Initializes the full RapiDoc renderer with theme synchronization. + * This is the component version of the inline JavaScript from rapidoc.html. + * + * Features: + * - Theme detection from Hugo's stylesheet toggle system + * - Automatic theme synchronization when user toggles dark/light mode + * - Shadow DOM manipulation to hide unwanted UI elements + * - CSS custom property injection for styling + * + * Usage: + *
+ * + * The component expects a element to already exist in the container + * (created by Hugo template) or will wait for it to be added. + */ + +import { getPreference } from '../services/local-storage.js'; + +interface ComponentOptions { + component: HTMLElement; +} + +interface ThemeColors { + theme: 'light' | 'dark'; + bgColor: string; + textColor: string; + headerColor: string; + primaryColor: string; + navBgColor: string; + navTextColor: string; + navHoverBgColor: string; + navHoverTextColor: string; + navAccentColor: string; + codeTheme: string; +} + +type CleanupFn = () => void; + +/** + * Get current theme from localStorage (source of truth for Hugo theme system) + */ +function getTheme(): 'dark' | 'light' { + const theme = getPreference('theme'); + return theme === 'dark' ? 'dark' : 'light'; +} + +/** + * Get theme colors matching Hugo SCSS variables + */ +function getThemeColors(isDark: boolean): ThemeColors { + if (isDark) { + return { + theme: 'dark', + bgColor: '#14141F', // $grey10 ($article-bg in dark theme) + textColor: '#D4D7DD', // $g15-platinum + headerColor: '#D4D7DD', + primaryColor: '#a0a0ff', + navBgColor: '#1a1a2a', + navTextColor: '#D4D7DD', + navHoverBgColor: '#252535', + navHoverTextColor: '#ffffff', + navAccentColor: '#a0a0ff', + codeTheme: 'monokai', + }; + } + + return { + theme: 'light', + bgColor: '#ffffff', // $g20-white + textColor: '#2b2b2b', + headerColor: '#020a47', // $br-dark-blue + primaryColor: '#020a47', + navBgColor: '#f7f8fa', + navTextColor: '#2b2b2b', + navHoverBgColor: '#e8e8f0', + navHoverTextColor: '#020a47', + navAccentColor: '#020a47', + codeTheme: 'prism', + }; +} + +/** + * Apply theme to RapiDoc element + */ +function applyTheme(rapiDoc: HTMLElement): void { + const isDark = getTheme() === 'dark'; + const colors = getThemeColors(isDark); + + rapiDoc.setAttribute('theme', colors.theme); + rapiDoc.setAttribute('bg-color', colors.bgColor); + rapiDoc.setAttribute('text-color', colors.textColor); + rapiDoc.setAttribute('header-color', colors.headerColor); + rapiDoc.setAttribute('primary-color', colors.primaryColor); + rapiDoc.setAttribute('nav-bg-color', colors.navBgColor); + rapiDoc.setAttribute('nav-text-color', colors.navTextColor); + rapiDoc.setAttribute('nav-hover-bg-color', colors.navHoverBgColor); + rapiDoc.setAttribute('nav-hover-text-color', colors.navHoverTextColor); + rapiDoc.setAttribute('nav-accent-color', colors.navAccentColor); + rapiDoc.setAttribute('code-theme', colors.codeTheme); +} + +/** + * Set custom CSS properties on RapiDoc element + */ +function setInputBorderStyles(rapiDoc: HTMLElement): void { + rapiDoc.style.setProperty('--border-color', '#00A3FF'); +} + +/** + * Hide unwanted elements in RapiDoc shadow DOM + */ +function hideExpandCollapseControls(rapiDoc: HTMLElement): void { + const maxAttempts = 10; + let attempts = 0; + + const tryHide = (): void => { + attempts++; + + try { + const shadowRoot = rapiDoc.shadowRoot; + if (!shadowRoot) { + if (attempts < maxAttempts) { + setTimeout(tryHide, 500); + } + return; + } + + // Find all elements and hide those containing "Expand all" / "Collapse all" + const allElements = shadowRoot.querySelectorAll('*'); + let hiddenCount = 0; + + allElements.forEach((element) => { + const text = element.textContent || ''; + + if (text.includes('Expand all') || text.includes('Collapse all')) { + (element as HTMLElement).style.display = 'none'; + if (element.parentElement) { + element.parentElement.style.display = 'none'; + } + hiddenCount++; + } + }); + + // Hide "Overview" headings + const headings = shadowRoot.querySelectorAll('h1, h2, h3, h4'); + headings.forEach((heading) => { + const text = (heading.textContent || '').trim(); + if (text.includes('Overview')) { + (heading as HTMLElement).style.display = 'none'; + hiddenCount++; + } + }); + + // Inject CSS as backup + const style = document.createElement('style'); + style.textContent = ` + .section-gap.section-tag, + [id*="overview"], + .regular-font.section-gap:empty, + h1:empty, h2:empty, h3:empty { + display: none !important; + } + `; + shadowRoot.appendChild(style); + + if (hiddenCount === 0 && attempts < maxAttempts) { + setTimeout(tryHide, 500); + } + } catch (e) { + if (attempts < maxAttempts) { + setTimeout(tryHide, 500); + } + } + }; + + setTimeout(tryHide, 500); +} + +/** + * Watch for theme changes via stylesheet toggle + */ +function watchThemeChanges(rapiDoc: HTMLElement): CleanupFn { + const handleThemeChange = (): void => { + applyTheme(rapiDoc); + }; + + // Watch stylesheet disabled attribute changes (Hugo theme.js toggles this) + const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + if ( + mutation.type === 'attributes' && + mutation.target instanceof HTMLLinkElement && + mutation.target.title?.includes('theme') + ) { + handleThemeChange(); + break; + } + // Also watch data-theme changes as fallback + if (mutation.attributeName === 'data-theme') { + handleThemeChange(); + } + } + }); + + // Observe head for stylesheet changes + observer.observe(document.head, { + attributes: true, + attributeFilter: ['disabled'], + subtree: true, + }); + + // Observe documentElement for data-theme changes + observer.observe(document.documentElement, { + attributes: true, + attributeFilter: ['data-theme'], + }); + + return (): void => { + observer.disconnect(); + }; +} + +/** + * Initialize RapiDoc component + */ +export default function ApiRapiDoc({ + component, +}: ComponentOptions): CleanupFn | void { + // Find the rapi-doc element inside the container + const rapiDoc = component.querySelector('rapi-doc') as HTMLElement | null; + + if (!rapiDoc) { + console.warn('[API RapiDoc] No rapi-doc element found in container'); + return; + } + + // Apply initial theme + applyTheme(rapiDoc); + + // Set custom CSS properties + if (customElements && customElements.whenDefined) { + customElements.whenDefined('rapi-doc').then(() => { + setInputBorderStyles(rapiDoc); + setTimeout(() => setInputBorderStyles(rapiDoc), 500); + }); + } else { + setInputBorderStyles(rapiDoc); + setTimeout(() => setInputBorderStyles(rapiDoc), 500); + } + + // Hide unwanted UI elements + hideExpandCollapseControls(rapiDoc); + + // Watch for theme changes + return watchThemeChanges(rapiDoc); +} +``` + +**Step 2: Verify the file was created correctly** + +Run: `head -30 assets/js/components/api-rapidoc.ts` +Expected: File header and imports visible + +**Step 3: Commit** + +```bash +git add assets/js/components/api-rapidoc.ts +git commit -m "feat(api): Create api-rapidoc TypeScript component + +Extract inline JavaScript from rapidoc.html into a proper TypeScript +component following the established component pattern." +``` + +*** + +## Task 2: Register api-rapidoc Component in main.js + +**Files:** + +- Modify: `assets/js/main.js:49-88` + +**Step 1: Add import for ApiRapiDoc** + +Add this import after line 52 (after RapiDocMini import): + +```javascript +import ApiRapiDoc from './components/api-rapidoc.ts'; +``` + +**Step 2: Register component in componentRegistry** + +Add this entry in the componentRegistry object (after line 87, the 'rapidoc-mini' entry): + +```javascript + 'api-rapidoc': ApiRapiDoc, +``` + +**Step 3: Verify changes** + +Run: `grep -n "api-rapidoc\|ApiRapiDoc" assets/js/main.js` +Expected: Both the import and registry entry appear + +**Step 4: Commit** + +```bash +git add assets/js/main.js +git commit -m "feat(api): Register api-rapidoc component in main.js" +``` + +*** + +## Task 3: Update rapidoc.html to Use Component Pattern + +**Files:** + +- Modify: `layouts/partials/api/rapidoc.html` + +**Step 1: Replace inline JavaScript with data-component attribute** + +Replace the entire content of `layouts/partials/api/rapidoc.html` with: + +```html +{{/* + RapiDoc API Documentation Renderer + + Primary API documentation renderer using RapiDoc with "Mix your own HTML" slots. + See: https://rapidocweb.com/examples.html + + Required page params: + - staticFilePath: Path to the OpenAPI specification file + + Optional page params: + - operationId: Specific operation to display (renders only that operation) + - tag: Tag to filter operations by + + RapiDoc slots available for custom content: + - slot="header" - Custom header + - slot="footer" - Custom footer + - slot="overview" - Custom overview content + - slot="auth" - Custom authentication section + - slot="nav-logo" - Custom navigation logo +*/}} + +{{ $specPath := .Params.staticFilePath }} +{{ $specPathJSON := replace $specPath ".yaml" ".json" | replace ".yml" ".json" }} +{{ $operationId := .Params.operationId | default "" }} +{{ $tag := .Params.tag | default "" }} + +{{/* Machine-readable links for AI agent discovery */}} +{{ if $specPath }} + + +{{ end }} + +
+ {{/* RapiDoc component with slot-based customization */}} + + {{/* Custom overview slot - Hugo page content */}} + {{ with .Content }} +
+ {{ . }} +
+ {{ end }} + + {{/* Custom examples from frontmatter */}} + {{ with .Params.examples }} +
+

Examples

+ {{ range . }} +
+

{{ .title }}

+ {{ with .description }}

{{ . | markdownify }}

{{ end }} +
{{ .code }}
+
+ {{ end }} +
+ {{ end }} +
+
+ +{{/* Load RapiDoc from CDN */}} + + + +``` + +**Step 2: Verify the inline script is removed** + +Run: `grep -c " + + diff --git a/layouts/partials/api/renderer.html b/layouts/partials/api/renderer.html new file mode 100644 index 0000000000..de3dfe9389 --- /dev/null +++ b/layouts/partials/api/renderer.html @@ -0,0 +1,10 @@ +{{/* + API Renderer + + Renders API documentation using RapiDoc. + + Required page params: + - staticFilePath: Path to the OpenAPI specification file +*/}} + +{{ partial "api/rapidoc.html" . }} diff --git a/layouts/partials/api/section-children.html b/layouts/partials/api/section-children.html new file mode 100644 index 0000000000..a381e8d091 --- /dev/null +++ b/layouts/partials/api/section-children.html @@ -0,0 +1,87 @@ +{{/* + API Section Children + + Renders tag pages from article data as a children list. + Sort order: conceptual tags (traitTags) first, then other tags alphabetically. + + Uses data from: + - data/article_data/influxdb/{product}/articles.yml +*/}} + +{{ $currentPage := . }} + +{{/* Extract product and version from URL */}} +{{ $productPathData := findRE "[^/]+.*?" .RelPermalink }} +{{ $product := index $productPathData 0 }} +{{ $version := index $productPathData 1 }} + +{{/* Build data key for article data lookup */}} +{{ $dataKey := "" }} +{{ if eq $product "influxdb3" }} + {{ $dataKey = print "influxdb3_" $version }} +{{ else if eq $product "influxdb" }} + {{ $dataKey = print $version }} +{{ else }} + {{ $dataKey = $product }} +{{ end }} + +{{/* Get article data for this product */}} +{{ $articles := slice }} +{{ with site.Data.article_data }} + {{ with index . "influxdb" }} + {{ with index . $dataKey }} + {{ with index . "articles" }} + {{ with .articles }} + {{ $articles = . }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} +{{ end }} + +{{ if gt (len $articles) 0 }} + {{/* Separate conceptual (traitTag) and non-conceptual articles */}} + {{ $conceptualArticles := slice }} + {{ $operationArticles := slice }} + + {{ range $articles }} + {{ if and (reflect.IsMap .) (isset . "fields") }} + {{ $fields := index . "fields" }} + {{ if reflect.IsMap $fields }} + {{ $isConceptual := false }} + {{ if isset $fields "isConceptual" }} + {{ $isConceptual = index $fields "isConceptual" }} + {{ end }} + {{ if $isConceptual }} + {{ $conceptualArticles = $conceptualArticles | append . }} + {{ else }} + {{ $operationArticles = $operationArticles | append . }} + {{ end }} + {{ end }} + {{ end }} + {{ end }} + + {{/* Sort each group alphabetically by tag name */}} + {{ $conceptualArticles = sort $conceptualArticles "fields.tag" }} + {{ $operationArticles = sort $operationArticles "fields.tag" }} + + {{/* Combine: conceptual first, then operations */}} + {{ $sortedArticles := $conceptualArticles | append $operationArticles }} + +
+ {{ range $sortedArticles }} + {{ $path := index . "path" }} + {{ $fields := index . "fields" }} + {{ $tag := index $fields "tag" }} + {{ $description := index $fields "description" | default "" }} + {{ $tagPageUrl := print "/" $product "/" $version "/" $path "/" | relURL }} + +
+

{{ $tag }}

+ {{ with $description }} +

{{ . }}

+ {{ end }} +
+ {{ end }} +
+{{ end }} diff --git a/layouts/partials/api/security-schemes.html b/layouts/partials/api/security-schemes.html new file mode 100644 index 0000000000..fff9c8fec6 --- /dev/null +++ b/layouts/partials/api/security-schemes.html @@ -0,0 +1,52 @@ +{{/* + Security Schemes Display + + Renders OpenAPI security schemes as styled documentation. + Extracts securitySchemes from the referenced OpenAPI spec file. + + Required page params: + - staticFilePath: Path to the OpenAPI specification file +*/}} + +{{ $specPath := .Params.staticFilePath }} +{{ if $specPath }} + {{/* Load the OpenAPI spec file from static directory */}} + {{ $fullPath := printf "static%s" $specPath }} + {{ $specContent := readFile $fullPath }} + {{ if $specContent }} + {{ $spec := transform.Unmarshal $specContent }} + {{ with $spec.components.securitySchemes }} +
+

Security Schemes

+ {{ range $name, $scheme := . }} +
+

{{ $name }}

+
+
+
Type
+
{{ $scheme.type }}
+ {{ with $scheme.scheme }} +
Scheme
+
{{ . }}
+ {{ end }} + {{ with $scheme.in }} +
In
+
{{ . }}
+ {{ end }} + {{ with $scheme.name }} +
Parameter Name
+
{{ . }}
+ {{ end }} +
+
+ {{ with $scheme.description }} +
+ {{ . | markdownify }} +
+ {{ end }} +
+ {{ end }} +
+ {{ end }} + {{ end }} +{{ end }} diff --git a/layouts/partials/sidebar.html b/layouts/partials/sidebar.html index 85caa0d2f2..bbbb263d02 100644 --- a/layouts/partials/sidebar.html +++ b/layouts/partials/sidebar.html @@ -74,12 +74,12 @@ {{ $platformMenu := .Site.Menus.platform }} - {{ partial "sidebar/nested-menu" (dict "page" $currentPage "menu" $mainMenu) . }} + {{ partial "sidebar/nested-menu" (dict "page" $currentPage "menu" $mainMenu "siteData" .Site.Data) }} {{ if gt (len $refMenu) 0 }}

Reference

- {{ partial "sidebar/nested-menu" (dict "page" $currentPage "menu" $refMenu) . }} + {{ partial "sidebar/nested-menu" (dict "page" $currentPage "menu" $refMenu "siteData" .Site.Data) }} {{ end }} @@ -97,7 +97,7 @@

Flux

{{ $platformWhitelist := `telegraf|chronograf|kapacitor|enterprise_influxdb|influxdb_1` }} {{ if gt (len (findRE $platformWhitelist $menuKey)) 0 }}

InfluxData Platform

- {{ partial "sidebar/nested-menu" (dict "page" $currentPage "menu" $platformMenu) . }} + {{ partial "sidebar/nested-menu" (dict "page" $currentPage "menu" $platformMenu "siteData" .Site.Data) }} {{ end }} diff --git a/layouts/partials/sidebar/api-menu-items.html b/layouts/partials/sidebar/api-menu-items.html new file mode 100644 index 0000000000..b5c8bade13 --- /dev/null +++ b/layouts/partials/sidebar/api-menu-items.html @@ -0,0 +1,203 @@ +{{/* + API Reference Menu Items for Hugo Navigation + + Generates + {{ end }} + + {{/* ALL ENDPOINTS - Collect all operations and sort by method+path */}} + {{ $allOperations := slice }} + {{ range $operationArticles }} + {{ $fields := index . "fields" }} + {{ if isset $fields "operations" }} + {{ range index $fields "operations" }} + {{ $allOperations = $allOperations | append . }} + {{ end }} + {{ end }} + {{ end }} + + {{ if gt (len $allOperations) 0 }} + {{/* Sort operations alphabetically by path, then method */}} + {{ $sortedOps := slice }} + {{ range $allOperations }} + {{ $sortKey := printf "%s %s" .path (upper .method) }} + {{ $sortedOps = $sortedOps | append (dict "sortKey" $sortKey "op" .) }} + {{ end }} + {{ $sortedOps = sort $sortedOps "sortKey" }} + + {{/* Check if any operation is active */}} + {{ $anyOpActive := false }} + {{ range $sortedOps }} + {{ $op := .op }} + {{ $opPathSlug := $op.path | replaceRE "^/" "" }} + {{ $opUrl := printf "/%s/%s/api/%s/%s/" $product $version $opPathSlug (lower $op.method) }} + {{ if eq $currentPage.RelPermalink $opUrl }} + {{ $anyOpActive = true }} + {{ end }} + {{ end }} + + + {{ end }} +{{ end }} diff --git a/layouts/partials/sidebar/nested-menu.html b/layouts/partials/sidebar/nested-menu.html index 67cf9a1e00..12dd9eec14 100644 --- a/layouts/partials/sidebar/nested-menu.html +++ b/layouts/partials/sidebar/nested-menu.html @@ -1,22 +1,39 @@ {{ $page := .page }} {{ $menu := .menu }} +{{ $siteData := .siteData }} {{ define "recursiveMenu" }} {{ $menuContext := .menu }} {{ $currentPage := .currentPage }} + {{ $site := .site }} + {{ $siteData := .siteData }} {{ $depth := add .depth 1 }} {{ $navClass := cond (gt $depth 1) "item" "category" }} {{ range $menuContext }} + {{/* Check if this is the InfluxDB HTTP API menu item for InfluxDB 3 products */}} + {{ $isApiParent := and (eq .Name "InfluxDB HTTP API") (or (hasPrefix .URL "/influxdb3/") (hasPrefix .URL "/influxdb/")) }} +